repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
andreaso/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import traceback
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
from ansible.module_utils.basic import AnsibleModule
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
client_id=dict(aliases=['CLIENT_ID'], no_log=True),
api_key=dict(aliases=['API_KEY'], no_log=True),
name=dict(type='str'),
id=dict(aliases=['droplet_id'], type='int'),
ssh_pub_key=dict(type='str'),
),
required_one_of=(
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
neurodata/ndstore
|
refs/heads/master
|
django/ndauth/models.py
|
792
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
|
foursquare/pants
|
refs/heads/master
|
src/python/pants/backend/project_info/tasks/dependencies.py
|
2
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TaskError
from pants.base.payload_field import JarsField, PythonRequirementsField
from pants.task.console_task import ConsoleTask
class Dependencies(ConsoleTask):
"""Print the target's dependencies."""
@staticmethod
def _is_jvm(target):
return isinstance(target, (JarLibrary, JvmTarget, JvmApp))
@classmethod
def register_options(cls, register):
super(Dependencies, cls).register_options(register)
register('--internal-only', type=bool,
help='Specifies that only internal dependencies should be included in the graph '
'output (no external jars).')
register('--external-only', type=bool,
help='Specifies that only external dependencies should be included in the graph '
'output (only external jars).')
register('--transitive', default=True, type=bool,
help='List transitive dependencies. Disable to only list dependencies defined '
'in target BUILD file(s).')
def __init__(self, *args, **kwargs):
super(Dependencies, self).__init__(*args, **kwargs)
self.is_internal_only = self.get_options().internal_only
self.is_external_only = self.get_options().external_only
self._transitive = self.get_options().transitive
if self.is_internal_only and self.is_external_only:
raise TaskError('At most one of --internal-only or --external-only can be selected.')
def console_output(self, unused_method_argument):
ordered_closure = OrderedSet()
for target in self.context.target_roots:
if self._transitive:
target.walk(ordered_closure.add)
else:
ordered_closure.update(target.dependencies)
for tgt in ordered_closure:
if not self.is_external_only:
yield tgt.address.spec
if not self.is_internal_only:
# TODO(John Sirois): We need an external payload abstraction at which point knowledge
# of jar and requirement payloads can go and this hairball will be untangled.
if isinstance(tgt.payload.get_field('requirements'), PythonRequirementsField):
for requirement in tgt.payload.requirements:
yield str(requirement.requirement)
elif isinstance(tgt.payload.get_field('jars'), JarsField):
for jar in tgt.payload.jars:
data = dict(org=jar.org, name=jar.name, rev=jar.rev)
yield ('{org}:{name}:{rev}' if jar.rev else '{org}:{name}').format(**data)
|
TheTacoScott/GoAtThrottleUp
|
refs/heads/master
|
ServerRelay/cherrypy/lib/auth.py
|
40
|
import cherrypy
from cherrypy.lib import httpauth
def check_auth(users, encrypt=None, realm=None):
"""If an authorization header contains credentials, return True, else False."""
request = cherrypy.serving.request
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
raise cherrypy.HTTPError(400, 'Bad Request')
if not encrypt:
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
if hasattr(users, '__call__'):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(realm, users, encrypt=None, debug=False):
"""If auth fails, raise 401 with a basic authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning a dict.
encrypt
callable used to encrypt the password returned from the user-agent.
if None it defaults to a md5 encryption.
"""
if check_auth(users, encrypt):
if debug:
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
gautamkrishnar/hatter
|
refs/heads/master
|
scripts/acronym.py
|
2
|
#!/usr/bin/python
"""
Acronym Decoder v1.0
usage: acronym.py [-max N] [-f] A.C.R.O.N.Y.M.
Prints the known expansions of the specified acronym. If the optional -max
parameter is specified, at most N matches will be output. If -f is passed,
the output is printed in "friendly" mode; otherwise, the matches will be output
one per line.
"""
def usage(): print __doc__
import getopt
import os
import re
import string
import sys
import urllib
### Compiled regular expressions
# Removes all unwanted characters from an acronym.
acroFormatRE = re.compile(r'[^A-Z\-]')
# matches the line BEFORE a definition
firstLineRE = re.compile(r'<td valign="middle" width="15%".*><b>(?P<acro>[A-Z\-]+)</b></td>')
# matches the definition of an acronym, excluding any paranthetical elaborations
secondLineRE = re.compile(r'<td valign="middle" width="75%".*>(<b>)?(?P<def>[^<(]+)(\([^<]+\))?(</b>)?$')
# matches the string indicating that no definitions were found
failureRE = re.compile(r'Sorry, <b>[A-Z\-]+</b> was not found in the database')
def formatAcro(acro):
return acroFormatRE.sub("", acro.upper())
def parse(f, acro, max=-1):
defOnNextLine = False
linenum = 0
found = []
for line in f.readlines():
# If we've found the maximum number of matches,
# stop now.
if max >= 0 and len(found) >= max:
break
# if we haven't found anything yet, check for failure
if len(found) == 0 and not defOnNextLine:
match = failureRE.search(line)
if match is not None:
break
# check this line against the appropriate RE.
# If the first line has already matched, look for an actual
# definition on the second line.
line = line.strip()
linenum += 1
if defOnNextLine:
defOnNextLine = False
match = secondLineRE.search(line)
if match is None:
# This is bad; there should be a definition here...
print "WARNING: did not find expected definition on line", linenum
continue
# add this def to the found list.
found.append( match.group("def").strip() )
else:
match = firstLineRE.search(line)
if match is not None and match.group("acro") == acro:
defOnNextLine = True
return found
if __name__ == "__main__":
# process command-line args
try:
opts, args = getopt.getopt(sys.argv[1:], "m:f", ["max"])
except getopt.GetoptError:
usage()
sys.exit(2)
maxMatches = -1
friendlyMode = False
for o,a in opts:
if o in ["-m", "-max"]:
maxMatches = int(a)
elif o == "-f":
friendlyMode = True
if len(args) != 1:
usage()
sys.exit(2)
# format the acronym to search for
acro = formatAcro(args[0])
# Submit the query and open a file handle to the results page.
class AppURLopener(urllib.FancyURLopener):
def __init__(self, *args):
self.version = "Mozilla/4.0"
urllib.FancyURLopener.__init__(self, *args)
urllib._urlopener = AppURLopener()
f = urllib.urlopen("http://www.acro" + "nymfind"+"er.com/af-q"+"uery.asp?Str"+"ing=exact&A"+"cronym=%s" % acro)
# Parse the results page to find a list of definitions. The list
# will be empty if none were found.
defs = parse(f, acro, maxMatches)
# Print the definitions.
if len(defs) == 0:
if friendlyMode: print "I don't know what %s stands for." % acro
else:
if friendlyMode:
print acro, "stands for:",
for d in defs[:-1]:
print d + ",",
print defs[-1] + "."
else:
for d in defs: print d
|
jimi-c/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_banner.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_banner
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage multiline banners on Cisco NXOS devices
description:
- This will configure both exec and motd banners on remote devices
running Cisco NXOS. It allows playbooks to add or remote
banner text from the active running configuration.
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
choices: ['exec', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
state:
description:
- Specifies whether or not the configuration is present in the current
devices active running configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: nxos
"""
EXAMPLES = """
- name: configure the exec banner
nxos_banner:
banner: exec
text: |
this is my exec banner
that contains a multiline
string
state: present
- name: remove the motd banner
nxos_banner:
banner: motd
state: absent
- name: Configure banner from file
nxos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner exec
- this is my exec banner
- that contains a multiline
- string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
import re
def execute_show_command(module, command):
format = 'json'
cmds = [{
'command': command,
'output': format,
}]
output = run_commands(module, cmds, False)
if len(output) == 0 or len(output[0]) == 0:
# If we get here the platform does not
# support structured output. Resend as
# text.
cmds[0]['output'] = 'text'
output = run_commands(module, cmds, False)
return output
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
platform_regex = 'Nexus.*Switch'
if state == 'absent':
if (have.get('text') and not ((have.get('text') == 'User Access Verification') or re.match(platform_regex, have.get('text')))):
commands.append('no banner %s' % module.params['banner'])
elif state == 'present' and want.get('text') != have.get('text'):
banner_cmd = 'banner %s @\n%s\n@' % (module.params['banner'], want['text'].strip())
commands.append(banner_cmd)
return commands
def map_config_to_obj(module):
command = 'show banner %s' % module.params['banner']
output = execute_show_command(module, command)[0]
if "Invalid command" in output:
module.fail_json(msg="banner: exec may not be supported on this platform. Possible values are : exec | motd")
if isinstance(output, dict):
output = list(output.values())
if output != []:
output = output[0]
else:
output = ''
if isinstance(output, dict):
output = list(output.values())
if output != []:
output = output[0]
else:
output = ''
else:
output = output.rstrip()
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = output
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['exec', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
msgs = load_config(module, commands, True)
if msgs:
for item in msgs:
if item:
if isinstance(item, dict):
err_str = item['clierror']
else:
err_str = item
if 'more than 40 lines' in err_str or 'buffer overflowed' in err_str:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
sirpercival/kivy
|
refs/heads/master
|
examples/canvas/lines_extended.py
|
17
|
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.lang import Builder
Builder.load_string('''
<LineEllipse1>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Ellipse'
<LineEllipse2>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 180)
Label:
center: root.center
text: 'Ellipse from 90 to 180'
# fun result with low segments!
<LineEllipse3>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 720, 10)
Label:
center: root.center
text: 'Ellipse from 90 to 720\\n10 segments'
halign: 'center'
<LineCircle1>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2)
Label:
center: root.center
text: 'Circle'
<LineCircle2>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180)
Label:
center: root.center
text: 'Circle from 90 to 180'
<LineCircle3>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180, 10)
Label:
center: root.center
text: 'Circle from 90 to 180\\n10 segments'
halign: 'center'
<LineCircle4>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 0, 360)
Label:
center: root.center
text: 'Circle from 0 to 360'
halign: 'center'
<LineRectangle>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
rectangle: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Rectangle'
<LineBezier>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
bezier: (self.x, self.y, self.center_x - 40, self.y + 100, self.center_x + 40, self.y - 100, self.right, self.y)
Label:
center: root.center
text: 'Bezier'
''')
class LineEllipse1(Widget):
pass
class LineEllipse2(Widget):
pass
class LineEllipse3(Widget):
pass
class LineCircle1(Widget):
pass
class LineCircle2(Widget):
pass
class LineCircle3(Widget):
pass
class LineCircle4(Widget):
pass
class LineRectangle(Widget):
pass
class LineBezier(Widget):
pass
class LineExtendedApp(App):
def build(self):
root = GridLayout(cols=2, padding=50, spacing=50)
root.add_widget(LineEllipse1())
root.add_widget(LineEllipse2())
root.add_widget(LineEllipse3())
root.add_widget(LineCircle1())
root.add_widget(LineCircle2())
root.add_widget(LineCircle3())
root.add_widget(LineCircle4())
root.add_widget(LineRectangle())
root.add_widget(LineBezier())
return root
if __name__ == '__main__':
LineExtendedApp().run()
|
toniher/biodb-benchmark
|
refs/heads/master
|
batch-add/batch-redis-add.py
|
1
|
import sys
import os
import redis
from Bio import SeqIO
def main(argv):
# Put stuff in JSON config file
r=redis.Redis()
batch = 1000
if len( argv ) > 1:
batch = int(argv[1])
itera = 0
r.flushdb()
pipeline=r.pipeline()
handle = open( argv[0], "r")
for record in SeqIO.parse(handle, "fasta") :
pipeline.set( str( record.id ), str( record.seq ) )
itera = itera + 1
if itera > batch :
pipeline.execute()
itera = 0
if itera > 0 :
pipeline.execute()
handle.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
crast/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/alpha/utilities.py
|
39
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for use with GRPC."""
from grpc.framework.alpha import interfaces
class _RpcMethodDescription(
interfaces.RpcMethodInvocationDescription,
interfaces.RpcMethodServiceDescription):
def __init__(
self, cardinality, unary_unary, unary_stream, stream_unary,
stream_stream, request_serializer, request_deserializer,
response_serializer, response_deserializer):
self._cardinality = cardinality
self._unary_unary = unary_unary
self._unary_stream = unary_stream
self._stream_unary = stream_unary
self._stream_stream = stream_stream
self._request_serializer = request_serializer
self._request_deserializer = request_deserializer
self._response_serializer = response_serializer
self._response_deserializer = response_deserializer
def cardinality(self):
"""See interfaces.RpcMethodDescription.cardinality for specification."""
return self._cardinality
def serialize_request(self, request):
"""See interfaces.RpcMethodInvocationDescription.serialize_request."""
return self._request_serializer(request)
def deserialize_request(self, serialized_request):
"""See interfaces.RpcMethodServiceDescription.deserialize_request."""
return self._request_deserializer(serialized_request)
def serialize_response(self, response):
"""See interfaces.RpcMethodServiceDescription.serialize_response."""
return self._response_serializer(response)
def deserialize_response(self, serialized_response):
"""See interfaces.RpcMethodInvocationDescription.deserialize_response."""
return self._response_deserializer(serialized_response)
def service_unary_unary(self, request, context):
"""See interfaces.RpcMethodServiceDescription.service_unary_unary."""
return self._unary_unary(request, context)
def service_unary_stream(self, request, context):
"""See interfaces.RpcMethodServiceDescription.service_unary_stream."""
return self._unary_stream(request, context)
def service_stream_unary(self, request_iterator, context):
"""See interfaces.RpcMethodServiceDescription.service_stream_unary."""
return self._stream_unary(request_iterator, context)
def service_stream_stream(self, request_iterator, context):
"""See interfaces.RpcMethodServiceDescription.service_stream_stream."""
return self._stream_stream(request_iterator, context)
def unary_unary_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a unary-request/unary-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_UNARY, None, None, None, None,
request_serializer, None, None, response_deserializer)
def unary_stream_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a unary-request/streaming-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_STREAM, None, None, None, None,
request_serializer, None, None, response_deserializer)
def stream_unary_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a streaming-request/unary-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_UNARY, None, None, None, None,
request_serializer, None, None, response_deserializer)
def stream_stream_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a streaming-request/streaming-response RPC
method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_STREAM, None, None, None, None,
request_serializer, None, None, response_deserializer)
def unary_unary_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a unary-unary RPC
method that accepts a single request and an interfaces.RpcContext and
returns a single response.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a unary-request/unary-response RPC
method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_UNARY, behavior, None, None, None,
None, request_deserializer, response_serializer, None)
def unary_stream_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a unary-stream RPC
method that accepts a single request and an interfaces.RpcContext
and returns an iterator of zero or more responses.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a unary-request/streaming-response
RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_STREAM, None, behavior, None, None,
None, request_deserializer, response_serializer, None)
def stream_unary_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a stream-unary RPC
method that accepts an iterator of zero or more requests
and an interfaces.RpcContext and returns a single response.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a streaming-request/unary-response
RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_UNARY, None, None, behavior, None,
None, request_deserializer, response_serializer, None)
def stream_stream_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a stream-stream RPC
method that accepts an iterator of zero or more requests
and an interfaces.RpcContext and returns an iterator of
zero or more responses.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a
streaming-request/streaming-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_STREAM, None, None, None, behavior,
None, request_deserializer, response_serializer, None)
|
JasonCormie/ansible-modules-extras
|
refs/heads/devel
|
cloud/profitbricks/profitbricks_datacenter.py
|
31
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: profitbricks_datacenter
short_description: Create or destroy a ProfitBricks Virtual Datacenter.
description:
- This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
name:
description:
- The name of the virtual datacenter.
required: true
description:
description:
- The description of the virtual datacenter.
required: false
location:
description:
- The datacenter location.
required: false
default: us/las
choices: [ "us/las", "de/fra", "de/fkb" ]
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
required: false
wait:
description:
- wait for the datacenter to be created before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- create or terminate datacenters
required: false
default: 'present'
choices: [ "present", "absent" ]
requirements: [ "profitbricks" ]
author: Matt Baldwin (baldwin@stackpointcloud.com)
'''
EXAMPLES = '''
# Create a Datacenter
- profitbricks_datacenter:
datacenter: Tardis One
wait_timeout: 500
# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
- profitbricks_datacenter:
datacenter: Tardis One
wait_timeout: 500
state: absent
'''
import re
import uuid
import time
import sys
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, Datacenter
except ImportError:
HAS_PB_SDK = False
LOCATIONS = ['us/las',
'de/fra',
'de/fkb']
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def _remove_datacenter(module, profitbricks, datacenter):
try:
profitbricks.delete_datacenter(datacenter)
except Exception as e:
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
def create_datacenter(module, profitbricks):
"""
Creates a Datacenter
This will create a new Datacenter in the specified location.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if a new datacenter was created, false otherwise
"""
name = module.params.get('name')
location = module.params.get('location')
description = module.params.get('description')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
virtual_datacenters = []
i = Datacenter(
name=name,
location=location,
description=description
)
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
if wait:
_wait_for_completion(profitbricks, datacenter_response,
wait_timeout, "_create_datacenter")
results = {
'datacenter_id': datacenter_response['id']
}
return results
except Exception as e:
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
def remove_datacenter(module, profitbricks):
"""
Removes a Datacenter.
This will remove a datacenter.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the datacenter was deleted, false otherwise
"""
name = module.params.get('name')
changed = False
if(uuid_match.match(name)):
_remove_datacenter(module, profitbricks, name)
changed = True
else:
datacenters = profitbricks.list_datacenters()
for d in datacenters['items']:
vdc = profitbricks.get_datacenter(d['id'])
if name == vdc['properties']['name']:
name = d['id']
_remove_datacenter(module, profitbricks, name)
changed = True
return changed
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
description=dict(),
location=dict(choices=LOCATIONS, default='us/las'),
subscription_user=dict(),
subscription_password=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
try:
(changed) = remove_datacenter(module, profitbricks)
module.exit_json(
changed=changed)
except Exception as e:
module.fail_json(msg='failed to set datacenter state: %s' % str(e))
elif state == 'present':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for a new datacenter')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for a new datacenter')
try:
(datacenter_dict_array) = create_datacenter(module, profitbricks)
module.exit_json(**datacenter_dict_array)
except Exception as e:
module.fail_json(msg='failed to set datacenter state: %s' % str(e))
from ansible.module_utils.basic import *
main()
|
apanju/odoo
|
refs/heads/8.0
|
addons/google_calendar/google_calendar.py
|
23
|
# -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and self.GG.status:
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in Odoo")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=event.duration) + timedelta(days=isCreating and 1 or 0), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
attendee_list.append({
'email': attendee.email or 'NoEmail@mail.com',
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
reminders = []
for alarm in event.alarm_ids:
reminders.append({
"method": "email" if alarm.type == "email" else "popup",
"minutes": alarm.duration_minutes
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': 'UTC'
},
"end": {
type: final_date,
vstype: None,
'timeZone': 'UTC'
},
"attendees": attendee_list,
"reminders": {
"overrides": reminders,
"useDefault": "false"
},
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return (status_response(st), content['id'] or False, ask_time)
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
calendar_alarm_obj = self.pool['calendar.alarm']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
alarm_record = []
partner_record = [(4, myPartnerID)]
result = {}
if self.get_need_synchro_attendee(cr, uid, context=context):
for google_attendee in single_event_dict.get('attendees', []):
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == google_attendee['email']:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found'):
continue
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', google_attendee['email'])], context=context)
if not attendee_id:
data = {
'email': google_attendee['email'],
'customer': False,
'name': google_attendee.get("displayName", False) or google_attendee['email']
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []):
alarm_id = calendar_alarm_obj.search(
cr,
uid,
[
('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'),
('duration_minutes', '=', google_alarm['minutes'])
],
context=context
)
if not alarm_id:
data = {
'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification',
'duration': google_alarm['minutes'],
'interval': 'minutes',
'name': "%s minutes - %s" % (google_alarm['minutes'], google_alarm['method'])
}
alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)]
alarm_record.append(alarm_id[0])
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'alarm_ids': [(6, 0, alarm_record)],
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
context = dict(context or {}, no_mail_to_attendees=True)
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data)
return True
def synchronize_events_cron(self, cr, uid, context=None):
ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context)
_logger.info("Calendar Synchro - Started by cron")
for user_to_sync in ids:
_logger.info("Calendar Synchro - Starting synchronization for a new user [%s] " % user_to_sync)
try:
resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None)
if resp.get("status") == "need_reset":
_logger.info("[%s] Calendar Synchro - Failed - NEED RESET !" % user_to_sync)
else:
_logger.info("[%s] Calendar Synchro - Done with status : %s !" % (user_to_sync, resp.get("status")))
except Exception, e:
_logger.info("[%s] Calendar Synchro - Exception : %s !" % (user_to_sync, exception_to_unicode(e)))
_logger.info("Calendar Synchro - Ended by cron")
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
user_to_sync = ids and ids[0] or uid
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context)
st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context):
lastSync = self.get_last_sync_date(cr, user_to_sync, context)
_logger.info("[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT)))
else:
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED" % user_to_sync)
else:
current_user.write({'google_calendar_cal_id': current_google})
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID" % user_to_sync)
new_ids = []
new_ids += self.create_new_events(cr, user_to_sync, context=context)
new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context)
res = self.update_events(cr, user_to_sync, lastSync, context)
current_user.write({'google_calendar_last_sync_date': ask_time})
return {
"status": res and "need_refresh" or "no_new_event_from_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
if not source_attendee_record_id:
continue
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(str(e))
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google is lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
if parent_oe_id:
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13)
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and Odoo Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'date', 'date_end', 'stop', 'attendee_ids', 'alarm_ids', 'location', 'class', 'active']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('Odoo Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('Odoo Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)
|
foreni-packages/golismero
|
refs/heads/master
|
tools/theHarvester/discovery/DNS/win32dns.py
|
46
|
"""
$Id: win32dns.py,v 1.3.2.1 2007/05/22 20:26:49 customdesigned Exp $
Extract a list of TCP/IP name servers from the registry 0.1
0.1 Strobl 2001-07-19
Usage:
RegistryResolve() returns a list of ip numbers (dotted quads), by
scouring the registry for addresses of name servers
Tested on Windows NT4 Server SP6a, Windows 2000 Pro SP2 and
Whistler Pro (XP) Build 2462 and Windows ME
... all having a different registry layout wrt name servers :-/
Todo:
Program doesn't check whether an interface is up or down
(c) 2001 Copyright by Wolfgang Strobl ws@mystrobl.de,
License analog to the current Python license
"""
import string, re
import _winreg
def binipdisplay(s):
"convert a binary array of ip adresses to a python list"
if len(s)%4!= 0:
raise EnvironmentError # well ...
ol=[]
for i in range(len(s)/4):
s1=s[:4]
s=s[4:]
ip=[]
for j in s1:
ip.append(str(ord(j)))
ol.append(string.join(ip,'.'))
return ol
def stringdisplay(s):
'''convert "d.d.d.d,d.d.d.d" to ["d.d.d.d","d.d.d.d"].
also handle u'd.d.d.d d.d.d.d', as reporting on SF
'''
import re
return map(str, re.split("[ ,]",s))
def RegistryResolve():
nameservers=[]
x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
try:
y= _winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters")
except EnvironmentError: # so it isn't NT/2000/XP
# windows ME, perhaps?
try: # for Windows ME
y= _winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\VxD\MSTCP")
nameserver,dummytype=_winreg.QueryValueEx(y,'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
return nameservers # no idea
try:
nameserver = _winreg.QueryValueEx(y, "DhcpNameServer")[0].split()
except:
nameserver = _winreg.QueryValueEx(y, "NameServer")[0].split()
if nameserver:
nameservers=nameserver
nameserver = _winreg.QueryValueEx(y,"NameServer")[0]
_winreg.CloseKey(y)
try: # for win2000
y= _winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\DNSRegisteredAdapters")
for i in range(1000):
try:
n=_winreg.EnumKey(y,i)
z=_winreg.OpenKey(y,n)
dnscount,dnscounttype=_winreg.QueryValueEx(z,
'DNSServerAddressCount')
dnsvalues,dnsvaluestype=_winreg.QueryValueEx(z,
'DNSServerAddresses')
nameservers.extend(binipdisplay(dnsvalues))
_winreg.CloseKey(z)
except EnvironmentError:
break
_winreg.CloseKey(y)
except EnvironmentError:
pass
#
try: # for whistler
y= _winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces")
for i in range(1000):
try:
n=_winreg.EnumKey(y,i)
z=_winreg.OpenKey(y,n)
try:
nameserver,dummytype=_winreg.QueryValueEx(z,'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
_winreg.CloseKey(z)
except EnvironmentError:
break
_winreg.CloseKey(y)
except EnvironmentError:
#print "Key Interfaces not found, just do nothing"
pass
#
_winreg.CloseKey(x)
return nameservers
if __name__=="__main__":
print "Name servers:",RegistryResolve()
#
# $Log: win32dns.py,v $
# Revision 1.3.2.1 2007/05/22 20:26:49 customdesigned
# Fix win32 nameserver discovery.
#
# Revision 1.3 2002/05/06 06:15:31 anthonybaxter
# apparently some versions of windows return servers as unicode
# string with space sep, rather than strings with comma sep.
# *sigh*
#
# Revision 1.2 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.1 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from Wolfgang.Strobl@gmd.de
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or
# some such.
#
#
|
catesandrew/Subler
|
refs/heads/master
|
Utilities/tvdb_py/cache.py
|
8
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
"""
urllib2 caching handler
Modified from http://code.activestate.com/recipes/491261/
"""
from __future__ import with_statement
__author__ = "dbr/Ben"
__version__ = "1.5"
import os
import time
import errno
import httplib
import urllib2
import StringIO
from hashlib import md5
from threading import RLock
cache_lock = RLock()
def locked_function(origfunc):
"""Decorator to execute function under lock"""
def wrapped(*args, **kwargs):
cache_lock.acquire()
try:
return origfunc(*args, **kwargs)
finally:
cache_lock.release()
return wrapped
def calculate_cache_path(cache_location, url):
"""Checks if [cache_location]/[hash_of_url].headers and .body exist
"""
thumb = md5(url).hexdigest()
header = os.path.join(cache_location, thumb + ".headers")
body = os.path.join(cache_location, thumb + ".body")
return header, body
def check_cache_time(path, max_age):
"""Checks if a file has been created/modified in the [last max_age] seconds.
False means the file is too old (or doesn't exist), True means it is
up-to-date and valid"""
if not os.path.isfile(path):
return False
cache_modified_time = os.stat(path).st_mtime
time_now = time.time()
if cache_modified_time < time_now - max_age:
# Cache is old
return False
else:
return True
@locked_function
def exists_in_cache(cache_location, url, max_age):
"""Returns if header AND body cache file exist (and are up-to-date)"""
hpath, bpath = calculate_cache_path(cache_location, url)
if os.path.exists(hpath) and os.path.exists(bpath):
return(
check_cache_time(hpath, max_age)
and check_cache_time(bpath, max_age)
)
else:
# File does not exist
return False
@locked_function
def store_in_cache(cache_location, url, response):
"""Tries to store response in cache."""
hpath, bpath = calculate_cache_path(cache_location, url)
try:
outf = open(hpath, "w")
headers = str(response.info())
outf.write(headers)
outf.close()
outf = open(bpath, "w")
outf.write(response.read())
outf.close()
except IOError:
return True
else:
return False
class CacheHandler(urllib2.BaseHandler):
"""Stores responses in a persistant on-disk cache.
If a subsequent GET request is made for the same URL, the stored
response is returned, saving time, resources and bandwidth
"""
@locked_function
def __init__(self, cache_location, max_age = 21600):
"""The location of the cache directory"""
self.max_age = max_age
self.cache_location = cache_location
if not os.path.exists(self.cache_location):
try:
os.mkdir(self.cache_location)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(self.cache_location):
# File exists, and it's a directory,
# another process beat us to creating this dir, that's OK.
pass
else:
# Our target dir is already a file, or different error,
# relay the error!
raise
def default_open(self, request):
"""Handles GET requests, if the response is cached it returns it
"""
if request.get_method() is not "GET":
return None # let the next handler try to handle the request
if exists_in_cache(
self.cache_location, request.get_full_url(), self.max_age
):
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = True
)
else:
return None
def http_response(self, request, response):
"""Gets a HTTP response, if it was a GET request and the status code
starts with 2 (200 OK etc) it caches it and returns a CachedResponse
"""
if (request.get_method() == "GET"
and str(response.code).startswith("2")
):
if 'x-local-cache' not in response.info():
# Response is not cached
set_cache_header = store_in_cache(
self.cache_location,
request.get_full_url(),
response
)
else:
set_cache_header = True
#end if x-cache in response
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = set_cache_header
)
else:
return response
class CachedResponse(StringIO.StringIO):
"""An urllib2.response-like object for cached responses.
To determine if a response is cached or coming directly from
the network, check the x-local-cache header rather than the object type.
"""
@locked_function
def __init__(self, cache_location, url, set_cache_header=True):
self.cache_location = cache_location
hpath, bpath = calculate_cache_path(cache_location, url)
StringIO.StringIO.__init__(self, file(bpath).read())
self.url = url
self.code = 200
self.msg = "OK"
headerbuf = file(hpath).read()
if set_cache_header:
headerbuf += "x-local-cache: %s\r\n" % (bpath)
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerbuf))
def info(self):
"""Returns headers
"""
return self.headers
def geturl(self):
"""Returns original URL
"""
return self.url
@locked_function
def recache(self):
new_request = urllib2.urlopen(self.url)
set_cache_header = store_in_cache(
self.cache_location,
new_request.url,
new_request
)
CachedResponse.__init__(self, self.cache_location, self.url, True)
if __name__ == "__main__":
def main():
"""Quick test/example of CacheHandler"""
opener = urllib2.build_opener(CacheHandler("/tmp/"))
response = opener.open("http://google.com")
print response.headers
print "Response:", response.read()
response.recache()
print response.headers
print "After recache:", response.read()
# Test usage in threads
from threading import Thread
class CacheThreadTest(Thread):
lastdata = None
def run(self):
req = opener.open("http://google.com")
newdata = req.read()
if self.lastdata is None:
self.lastdata = newdata
assert self.lastdata == newdata, "Data was not consistent, uhoh"
req.recache()
threads = [CacheThreadTest() for x in range(50)]
print "Starting threads"
[t.start() for t in threads]
print "..done"
print "Joining threads"
[t.join() for t in threads]
print "..done"
main()
|
apyrgio/snf-ganeti
|
refs/heads/stable-2.10-bpo2
|
test/py/ganeti.ssh_unittest.py
|
1
|
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the ssh module"""
import os
import tempfile
import unittest
import shutil
import testutils
import mocks
from ganeti import constants
from ganeti import utils
from ganeti import ssh
from ganeti import errors
class TestKnownHosts(testutils.GanetiTestCase):
"""Test case for function writing the known_hosts file"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpfile = self._CreateTempFile()
def test(self):
cfg = mocks.FakeConfig()
ssh.WriteKnownHostsFile(cfg, self.tmpfile)
self.assertFileContent(self.tmpfile,
"%s ssh-rsa %s\n%s ssh-dss %s\n" %
(cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY,
cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY))
class TestGetUserFiles(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@staticmethod
def _GetNoHomedir(_):
return None
def _GetTempHomedir(self, _):
return self.tmpdir
def testNonExistantUser(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example",
kind=kind, _homedir_fn=self._GetNoHomedir)
def testUnknownKind(self):
kind = "something-else"
assert kind not in constants.SSHK_ALL
self.assertRaises(errors.ProgrammerError, ssh.GetUserFiles, "example4645",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testNoSshDirectory(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example29694",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testSshIsFile(self):
utils.WriteFile(os.path.join(self.tmpdir, ".ssh"), data="")
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example26237",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
def testMakeSshDirectory(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example20745", mkdir=True, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.stat(sshdir).st_mode & 0777, 0700)
def testFilenames(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
os.mkdir(sshdir)
for kind in constants.SSHK_ALL:
result = ssh.GetUserFiles("example15103", mkdir=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result, [
os.path.join(self.tmpdir, ".ssh", "id_%s" % kind),
os.path.join(self.tmpdir, ".ssh", "id_%s.pub" % kind),
os.path.join(self.tmpdir, ".ssh", "authorized_keys"),
])
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(sshdir), [])
def testNoDirCheck(self):
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example14528", mkdir=False, dircheck=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFiles(self):
result = ssh.GetAllUserFiles("example7475", mkdir=False, dircheck=False,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result,
(os.path.join(self.tmpdir, ".ssh", "authorized_keys"), {
constants.SSHK_RSA:
(os.path.join(self.tmpdir, ".ssh", "id_rsa"),
os.path.join(self.tmpdir, ".ssh", "id_rsa.pub")),
constants.SSHK_DSA:
(os.path.join(self.tmpdir, ".ssh", "id_dsa"),
os.path.join(self.tmpdir, ".ssh", "id_dsa.pub")),
}))
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFilesNoDirectoryNoMkdir(self):
self.assertRaises(errors.OpExecError, ssh.GetAllUserFiles,
"example17270", mkdir=False, dircheck=True,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
gonboy/sl4a
|
refs/heads/master
|
python/src/Mac/Modules/ah/ahscan.py
|
34
|
# Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner_OSX
LONG = "AppleHelp"
SHORT = "ah"
OBJECT = "NOTUSED"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner_OSX):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
]
def makeblacklisttypes(self):
return [
]
def makerepairinstructions(self):
return [
]
if __name__ == "__main__":
main()
|
nesdis/djongo
|
refs/heads/master
|
tests/django_tests/tests/v21/tests/generic_views/models.py
|
112
|
from django.db import models
from django.db.models import QuerySet
from django.db.models.manager import BaseManager
from django.urls import reverse
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('artist_detail', kwargs={'pk': self.id})
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class DoesNotExistQuerySet(QuerySet):
def get(self, *args, **kwargs):
raise Author.DoesNotExist
DoesNotExistBookManager = BaseManager.from_queryset(DoesNotExistQuerySet)
class Book(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
objects = models.Manager()
does_not_exist = DoesNotExistBookManager()
class Meta:
ordering = ['-pubdate']
def __str__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=255)
class BookSigning(models.Model):
event_date = models.DateTimeField()
|
erikabarros/naguil
|
refs/heads/master
|
backend/apps/cliente_app/cliente_facade.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaegraph.business_base import NodeSearch, DeleteNode
from cliente_app.cliente_commands import ListClienteCommand, SaveClienteCommand, UpdateClienteCommand, ClienteForm,\
GetClienteCommand, DeleteClienteCommand
from gaepermission.decorator import login_not_required
@login_not_required
def save_cliente_cmd(**cliente_properties):
"""
Command to save Cliente entity
:param cliente_properties: a dict of properties to save on model
:return: a Command that save Cliente, validating and localizing properties received as strings
"""
return SaveClienteCommand(**cliente_properties)
@login_not_required
def update_cliente_cmd(cliente_id, **cliente_properties):
"""
Command to update Cliente entity with id equals 'cliente_id'
:param cliente_properties: a dict of properties to update model
:return: a Command that update Cliente, validating and localizing properties received as strings
"""
return UpdateClienteCommand(cliente_id, **cliente_properties)
@login_not_required
def list_clientes_cmd():
"""
Command to list Cliente entities ordered by their creation dates
:return: a Command proceed the db operations when executed
"""
return ListClienteCommand()
@login_not_required
def cliente_form(**kwargs):
"""
Function to get Cliente's detail form.
:param kwargs: form properties
:return: Form
"""
return ClienteForm(**kwargs)
@login_not_required
def get_cliente_cmd(cliente_id):
"""
Find cliente by her id
:param cliente_id: the cliente id
:return: Command
"""
return GetClienteCommand(cliente_id)
@login_not_required
def delete_cliente_cmd(cliente_id):
"""
Construct a command to delete a Cliente
:param cliente_id: cliente's id
:return: Command
"""
return DeleteClienteCommand(cliente_id)
|
youprofit/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/model/queuelog.py
|
122
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from time import time
from datetime import datetime
from google.appengine.ext import db
from model.workitems import WorkItems
from model.activeworkitems import ActiveWorkItems
class QueueLog(db.Model):
date = db.DateTimeProperty()
# duration specifies in seconds the time period these log values apply to.
duration = db.IntegerProperty()
queue_name = db.StringProperty()
bot_ids_seen = db.StringListProperty()
max_patches_waiting = db.IntegerProperty(default=0)
patch_wait_durations = db.ListProperty(int)
patch_process_durations = db.ListProperty(int)
patch_retry_count = db.IntegerProperty(default=0)
status_update_count = db.IntegerProperty(default=0)
@staticmethod
def create_key(queue_name, duration, timestamp):
return "%s-%s-%s" % (queue_name, duration, timestamp)
@classmethod
def get_at(cls, queue_name, duration, timestamp):
timestamp = int(timestamp / duration) * duration
date = datetime.utcfromtimestamp(timestamp)
key = cls.create_key(queue_name, duration, timestamp)
return cls.get_or_create(key, date=date, duration=duration, queue_name=queue_name)
@classmethod
def get_current(cls, queue_name, duration):
return cls.get_at(queue_name, duration, time())
# This is to prevent page requests from generating lots of rows in the database.
@classmethod
def get_or_create(cls, key_name, **kwargs):
return db.run_in_transaction(cls._get_or_create_txn, key_name, **kwargs)
def update_max_patches_waiting(self):
patches_waiting = self._get_patches_waiting(self.queue_name)
if patches_waiting > self.max_patches_waiting:
self.max_patches_waiting = patches_waiting
return True
return False
@classmethod
def _get_or_create_txn(cls, key_name, **kwargs):
entity = cls.get_by_key_name(key_name, parent=kwargs.get('parent'))
if entity is None:
entity = cls(key_name=key_name, **kwargs)
return entity
@classmethod
def _get_patches_waiting(cls, queue_name):
work_items = WorkItems.lookup_by_queue(queue_name)
active_work_items = ActiveWorkItems.lookup_by_queue(queue_name)
return len(set(work_items.item_ids) - set(active_work_items.item_ids))
|
openconnectome/ndobjectdetect
|
refs/heads/master
|
maca/packages/theano_membrane_segmentation/em_train.py
|
5
|
################################################################################
# (c) [2013] The Johns Hopkins University / Applied Physics Laboratory All Rights Reserved.
# Contact the JHU/APL Office of Technology Transfer for any additional rights. www.jhuapl.edu/ott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
""" Solves a membrane detection/classification problem.
This module provides the toplevel interface for solving a binary
"membrane vs non-membrane" classification problem for EM data sets
(e.g. [1]) using convolutional neural networks.
The overall approach is based on Dan Ciresan's paper [2] and the code
is derived from a LeNet example included in the Theano code base for
MNIST classification.
References:
[1] http://brainiac2.mit.edu/isbi_challenge/
[2] Ciresan, Dan, et al. "Deep neural networks segment neuronal membranes
in electron microscopy images." Advances in neural information
processing systems. 2012.
December 2013, mjp
"""
import os, os.path
import sys, time
import socket
import argparse
import numpy
from PIL import Image
import pdb
import theano
import theano.tensor as T
import em_networks as EMN
from em_utils import *
from tiles import *
def load_membrane_data(trainDataFile, trainLabelsFile,
tileSize,
trainSlices, validSlices,
nZeeChannels=0):
"""Loads data set and creates corresponding tile managers.
"""
# load the volume and the labels
if trainDataFile.endswith('.tif'):
X = load_tiff_data(trainDataFile)
# Assumes raw conference data (i.e. not preprocessed).
#for ii in range(X.shape[0]):
# X[ii,:,:] = X[ii,:,:] - numpy.mean(X[ii,:,:])
#X = X / numpy.max(numpy.abs(X))
print '[%s]: Warning: no longer zero-meaning and scaling data' % __name__
elif trainDataFile.endswith('.npz'):
# assumes volume data is stored as the tensor X and is suitably preprocessed
X = numpy.load(trainDataFile)['X']
else:
raise RuntimeError('unexpected data file extension')
Y = load_tiff_data(trainLabelsFile)
# mirror edges
border = numpy.floor(tileSize/2.)
X = mirror_edges_tensor(X, border)
Y = mirror_edges_tensor(Y, border)
# Use 0 and 1 as class labels. This is actually important because
# the neural network code will use class labels as indices into
# the outputs of the last network layer.
#
# 0 := non-membrane
# 1 := membrane
Y[Y==0] = 1; Y[Y==255] = 0
assert(Y.max() == 1)
X_train = X[trainSlices,:,:]
Y_train = Y[trainSlices,:,:]
X_valid = X[validSlices,:,:]
Y_valid = Y[validSlices,:,:]
# tile managers will put the images into GPU memory via Theano shared vars.
train = TileManager(X_train, Y_train, tileSize=tileSize, nZeeChannels=nZeeChannels)
valid = TileManager(X_valid, Y_valid, tileSize=tileSize, nZeeChannels=nZeeChannels)
return (train, valid, (X, Y))
def random_image_modifiers(flipProb=.6, rotProb=.6):
"""Randomly applies certain transforms to a 2d image.
As of this writing, these transforms are some
combination of flips and rotations.
"""
# clip probabilities to [0,1]
flipProb = max(min(flipProb,1),0)
rotProb = max(min(rotProb,1),0)
flipDim = 0; rotDir = 0
if numpy.random.rand() < flipProb:
flipDim = numpy.sign(numpy.random.rand() - .5)
if numpy.random.rand() < rotProb:
rotDir = numpy.sign(numpy.random.rand() - .5)
return flipDim, rotDir
def train_network(nn, trainMgr, validMgr,
nEpochs=30, learningRate=.001, decay=.995,
maxNumTilesPerEpoch=sys.maxint,
outDir="."):
"""Learns parameters for the given neural network.
"""
p2 = int(numpy.floor(nn.p/2.0))
# compute number of minibatches
nTrainBatches = int(numpy.ceil(trainMgr.batchSize / nn.miniBatchSize))
nValidBatches = int(numpy.ceil(validMgr.batchSize / nn.miniBatchSize))
print '[%s]: # of training batches is %d' % (__name__, nTrainBatches)
# allocate symbolic variables
indexT = T.lscalar() # index to a [mini]batch
learningRateT = T.scalar() # learning rate, theano variable
print '[%s]: initializing Theano...' % __name__
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the validation data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
predict_validation_data = theano.function([indexT], nn.layers[-1].p_y_given_x,
givens={
nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
#nn.y: validMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the training data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The cost we minimize during training is the NLL of the model
# Assumes the last layer is the logistic regression layer.
cost = nn.layers[-1].negative_log_likelihood(nn.y)
# create a list of all model parameters to be fit by gradient descent
#params = layer3.params + layer2.params + layer1.params + layer0.params
params = reduce(lambda a,b: a+b, [l.params for l in nn.layers])
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters via
# SGD. Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],grads[i]) pairs.
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - learningRateT * grad_i))
train_model = theano.function([indexT, learningRateT], [cost, nn.layers[-1].p_y_given_x], updates=updates,
givens={
nn.x: trainMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
nn.y: trainMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Do the training
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
startTime = time.clock()
trainTime = 0
validTime = 0
lastChatter = -1
nTilesProcessed = 0
nTilesFlipped = 0
nTilesRotated = 0
print '[%s]: Training network.' % __name__
for epoch in xrange(nEpochs):
print '[%s]: Starting epoch %d / %d (net time: %0.2f m)' % (__name__, epoch, nEpochs, (time.clock()-startTime)/60.)
sys.stdout.flush()
prevParams = EMN.save_network_parameters(nn, None) # params just before learning
predictions = numpy.zeros(trainMgr.y_batch_local.shape)
nErrors = 0
for slices,rows,cols,pct in trainMgr.make_balanced_pixel_generator():
# reset predictions
predictions[:] = -1;
# transform images and udpate GPU memory
flipDim,rotDir = random_image_modifiers()
trainMgr.update_gpu(slices, rows, cols, flipDim=flipDim, rotDir=rotDir)
if flipDim != 0: nTilesFlipped += len(slices)
if rotDir != 0: nTilesRotated += len(slices)
# process all mini-batches
for minibatchIdx in xrange(nTrainBatches):
tic = time.clock()
[costij, probij] = train_model(minibatchIdx, learningRate)
trainTime += time.clock()-tic
predij = numpy.argmax(probij,axis=1)
predictions[(minibatchIdx*nn.miniBatchSize):(minibatchIdx+1)*nn.miniBatchSize] = predij
nTilesProcessed += len(slices)
nErrors = numpy.sum(predictions != trainMgr.y_batch_local)
# periodically report progress (e.g. every 30 min)
netTime = time.clock()-startTime
if numpy.floor(netTime/1800) > lastChatter:
print '[%s]: epoch %d; processed %0.2e tiles (%0.2f %%); net time %0.2f m' % (__name__, epoch, nTilesProcessed, pct, netTime/60.)
lastChatter = numpy.floor(netTime/1800)
sys.stdout.flush()
# check for early epoch termination
if nTilesProcessed >= maxNumTilesPerEpoch:
print '[%s]: epoch %d: quitting early after %d tiles processed (%0.2f %%)' % (__name__, epoch, nTilesProcessed, pct)
break
#----------------------------------------
# update learning rate after each training epoch
#----------------------------------------
if decay < 1:
learningRate *= decay
#----------------------------------------
# save result (even though it may just be an intermediate result)
#----------------------------------------
fn = 'params_epoch%02d' % epoch
newParams = EMN.save_network_parameters(nn, os.path.join(outDir, fn), verbose=False)
# report how much the network parameters changed
keys = newParams.keys(); keys.sort()
for key in keys:
delta = numpy.ndarray.flatten(numpy.abs(newParams[key] - prevParams[key]))
print '[%s]: %s (%d params)\n %0.2e / %0.2e / %0.2e / %0.2e' % (__name__, key, len(delta), numpy.min(delta), numpy.max(delta), numpy.mean(delta), numpy.median(delta))
#----------------------------------------
# validation performance
#----------------------------------------
print '[%s]: validating performance ...' % __name__
Y_hat = numpy.zeros(validMgr.Y_local.shape)
for slices,rows,cols in validMgr.make_all_pixel_generator():
# update tiles on the GPU
validMgr.update_gpu(slices,rows,cols,flipDim=0,rotDir=0)
for ii in range(nValidBatches):
# predictions is a (nTiles x 2) matrix
# grab the second output (y=1)
# (i.e. we store probability of membrane)
tic = time.clock()
pMembrane = predict_validation_data(ii)[:,1]
validTime += time.clock() - tic
# Be careful - on the last iteration, there may be
# less than batchSize tiles remaining.
a = ii*nn.miniBatchSize
b = min((ii+1)*nn.miniBatchSize, len(slices))
if a > len(slices): break
Y_hat[slices[a:b], rows[a:b], cols[a:b]] = pMembrane[0:b-a]
# Validation statistics are based on a simple threshold
# (without any other postprocessing).
#
# note: throw away the border before evaluating
Y_true = validMgr.Y_local[:,p2:-p2,p2:-p2]
Y_hat = Y_hat[:,p2:-p2,p2:-p2]
eval_performance(Y_true, Y_hat, 0.5, verbose=True)
eval_performance(Y_true, Y_hat, 0.7, verbose=True)
# statistics for this epoch
print '[%s]: epoch %d complete!' % (__name__, epoch)
print '[%s]: learning rate: %0.2e' % (__name__, learningRate)
print '[%s]: # errors: %d' % (__name__, nErrors)
print '[%s]: net elapsed time: %0.2f m' % (__name__, ((time.clock() - startTime) / 60.))
print '[%s]: net gpu train time: %0.2f m' % (__name__, (trainTime/60.))
print '[%s]: net validation time: %0.2f m' % (__name__, (validTime/60.))
print '[%s]: processed tiles: %0.2e' % (__name__, nTilesProcessed)
print '[%s]: flipped tiles: %0.2e' % (__name__, nTilesFlipped)
print '[%s]: rotated tiles: %0.2e' % (__name__, nTilesRotated)
endTime = time.clock()
print('[%s]: Optimization complete.' % __name__)
print '[%s]: The code for file "%s" ran for %0.2fm' % (__name__, os.path.split(__file__)[1], ((endTime - startTime) / 60.))
print "[%s]: GPU train time: %0.2fm" % (__name__, (trainTime/60.0))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train a neural network on the EM data set')
#
# Parameters for defining and training the neural network
#
parser.add_argument('-n', dest='network', type=str, default='LeNetMembraneN3',
help='neural network architecture (use a class name here)')
parser.add_argument('-e', dest='nEpochs', type=int, default=30,
help='number of training epochs')
parser.add_argument('-r', dest='learnRate', type=float, default=0.001,
help='starting learning rate')
parser.add_argument('-d', dest='decay', type=float, default=0.995,
help='learning rate decay')
parser.add_argument('-m', dest='maxNumTilesPerEpoch', type=int, default=sys.maxint,
help='Maximum number of tiles used per epoch. Use this if there are too many tiles to process them all each epoch.')
#
# Data set parameters. Assuming here a data cube, where each xy-plane is a "slice" of the cube.
#
parser.add_argument('-X', dest='trainFileName', type=str, default='train-volume-raw.npz',
help='Name of the file containing the membrane data (i.e. X)')
parser.add_argument('-Y', dest='labelsFileName', type=str, default='train-labels.tif',
help='This is the file containing the class labels (i.e. Y)')
parser.add_argument('--train-slices', dest='trainSlicesExpr', type=str, default='range(1,30)',
help='A python-evaluatable string indicating which slices should be used for training')
parser.add_argument('--valid-slices', dest='validSliceExpr', type=str, default='range(27,30)',
help='A python-evaluatable string indicating which slices should be used for validation')
#
# Some special-case flags
#
parser.add_argument('--redirect-stdout', dest='redirectStdout', type=int, default=0,
help='set to 1 to send stdout to log.txt')
parser.add_argument('-c', dest='nZeeChannels', type=int, default=0,
help='number of "mirror" channels')
args = parser.parse_args()
# define and create output directory
host = socket.gethostname()
deviceAndDate = theano.config.device + '_' + time.strftime('%d-%m-%Y')
outDir = os.path.join(host, deviceAndDate, '%s_%03d_%0.4f_%0.4f' % (args.network, args.nEpochs, args.learnRate, args.decay))
if not os.path.isdir(outDir): os.makedirs(outDir)
# Redirect stdout, if asked to do so
if args.redirectStdout:
fn = os.path.join(outDir, 'log.txt')
sys.stdout = open(fn, 'w')
# Set up train/valid slices. Using eval() might not be ideal, but
# provides an easy way for the caller to define train/validation.
trainSlices = eval(args.trainSlicesExpr)
validSlices = eval(args.validSliceExpr)
# create a neural network instance
clazz = getattr(EMN, args.network)
nn = clazz(nChannels=1+2*args.nZeeChannels)
print '[%s]: Using the following parameters:' % __name__
print ' start time: %s' % time.ctime()
print ' host: %s' % host
print ' device: %s' % theano.config.device
print ' pid: %s' % os.getpid()
print ' train data: %s' % args.trainFileName
print ' train labels: %s' % args.labelsFileName
print ' train slices: %s' % trainSlices
print ' valid slices: %s' % validSlices
print ' network: %s' % nn.__class__.__name__
print ' # epochs: %d' % args.nEpochs
print ' max # tiles/epoch: %d' % args.maxNumTilesPerEpoch
print ' learn rate: %0.3f' % args.learnRate
print ' decay: %0.3f' % args.decay
print ' tile size: %d' % nn.p
for idx,l in enumerate(nn.layers):
print ' layer %d: ' % idx,
print str(l.W.get_value().shape)
print ' z-channels: %d' % args.nZeeChannels
print ' output dir: %s' % outDir
print '[%s]: Loading data...' % __name__
(train,valid,membraneData) = load_membrane_data(args.trainFileName, args.labelsFileName,
tileSize=nn.p,
trainSlices=trainSlices,
validSlices=validSlices,
nZeeChannels=args.nZeeChannels)
print ' train dim: %d x %d x %d' % (train.X_local.shape)
print ' valid dim: %d x %d x %d' % (valid.X_local.shape)
print ' valid slices: %s' % (validSlices)
#--------------------------------------------------
# Do the work
#--------------------------------------------------
# train the neural network
train_network(nn, train, valid,
learningRate=args.learnRate,
decay=args.decay,
nEpochs=args.nEpochs,
maxNumTilesPerEpoch=args.maxNumTilesPerEpoch,
outDir=outDir)
|
issi5862/ishida_jbd2_linux-2.0
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/managementpartner/azure-mgmt-managementpartner/azure/mgmt/managementpartner/operations/__init__.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._partner_operations import PartnerOperations
from ._operation_operations import OperationOperations
from ._partners_operations import PartnersOperations
__all__ = [
'PartnerOperations',
'OperationOperations',
'PartnersOperations',
]
|
Francis-Liu/animated-broccoli
|
refs/heads/master
|
nova/virt/xenapi/client/objects.py
|
97
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import utils
class XenAPISessionObject(object):
"""Wrapper to make calling and mocking the session easier
The XenAPI protocol is an XML RPC API that is based around the
XenAPI database, and operations you can do on each of the objects
stored in the database, such as VM, SR, VDI, etc.
For more details see the XenAPI docs:
http://docs.vmd.citrix.com/XenServer/6.2.0/1.0/en_gb/api/
Most, objects like VM, SR, VDI, etc, share a common set of methods:
* vm_ref = session.VM.create(vm_rec)
* vm_ref = session.VM.get_by_uuid(uuid)
* session.VM.destroy(vm_ref)
* vm_refs = session.VM.get_all()
Each object also has specific messages, or functions, such as:
* session.VM.clean_reboot(vm_ref)
Each object has fields, like "VBDs" that can be fetched like this:
* vbd_refs = session.VM.get_VBDs(vm_ref)
You can get all the fields by fetching the full record.
However please note this is much more expensive than just
fetching the field you require:
* vm_rec = session.VM.get_record(vm_ref)
When searching for particular objects, you may be tempted
to use get_all(), but this often leads to races as objects
get deleted under your feet. It is preferable to use the undocumented:
* vms = session.VM.get_all_records_where(
'field "is_control_domain"="true"')
"""
def __init__(self, session, name):
self.session = session
self.name = name
def _call_method(self, method_name, *args):
call = "%s.%s" % (self.name, method_name)
return self.session.call_xenapi(call, *args)
def __getattr__(self, method_name):
return lambda *params: self._call_method(method_name, *params)
class VM(XenAPISessionObject):
"""Virtual Machine."""
def __init__(self, session):
super(VM, self).__init__(session, "VM")
class VBD(XenAPISessionObject):
"""Virtual block device."""
def __init__(self, session):
super(VBD, self).__init__(session, "VBD")
def plug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_plug():
self._call_method("plug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_plug()
def unplug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_unplug():
self._call_method("unplug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_unplug()
class VDI(XenAPISessionObject):
"""Virtual disk image."""
def __init__(self, session):
super(VDI, self).__init__(session, "VDI")
class SR(XenAPISessionObject):
"""Storage Repository."""
def __init__(self, session):
super(SR, self).__init__(session, "SR")
class PBD(XenAPISessionObject):
"""Physical block device."""
def __init__(self, session):
super(PBD, self).__init__(session, "PBD")
class PIF(XenAPISessionObject):
"""Physical Network Interface."""
def __init__(self, session):
super(PIF, self).__init__(session, "PIF")
class VLAN(XenAPISessionObject):
"""VLAN."""
def __init__(self, session):
super(VLAN, self).__init__(session, "VLAN")
class Host(XenAPISessionObject):
"""XenServer hosts."""
def __init__(self, session):
super(Host, self).__init__(session, "host")
class Network(XenAPISessionObject):
"""Networks that VIFs are attached to."""
def __init__(self, session):
super(Network, self).__init__(session, "network")
class Pool(XenAPISessionObject):
"""Pool of hosts."""
def __init__(self, session):
super(Pool, self).__init__(session, "pool")
|
mtanski/samba
|
refs/heads/master
|
source4/dsdb/tests/python/passwords.py
|
27
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This tests the password changes over LDAP for AD implementations
#
# Copyright Matthias Dieter Wallnoefer 2010
#
# Notice: This tests will also work against Windows Server if the connection is
# secured enough (SASL with a minimum of 128 Bit encryption) - consider
# MS-ADTS 3.1.1.3.1.5
import optparse
import sys
import base64
import time
import os
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import SubunitOptions, TestProgram
import samba.getopt as options
from samba.auth import system_session
from samba.credentials import Credentials
from ldb import SCOPE_BASE, LdbError
from ldb import ERR_ATTRIBUTE_OR_VALUE_EXISTS
from ldb import ERR_UNWILLING_TO_PERFORM, ERR_INSUFFICIENT_ACCESS_RIGHTS
from ldb import ERR_NO_SUCH_ATTRIBUTE
from ldb import ERR_CONSTRAINT_VIOLATION
from ldb import Message, MessageElement, Dn
from ldb import FLAG_MOD_ADD, FLAG_MOD_REPLACE, FLAG_MOD_DELETE
from samba import gensec
from samba.samdb import SamDB
import samba.tests
from samba.tests import delete_force
parser = optparse.OptionParser("passwords.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
# Force an encrypted connection
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class PasswordTests(samba.tests.TestCase):
def setUp(self):
super(PasswordTests, self).setUp()
self.ldb = SamDB(url=host, session_info=system_session(lp), credentials=creds, lp=lp)
# Gets back the basedn
base_dn = self.ldb.domain_dn()
# Gets back the configuration basedn
configuration_dn = self.ldb.get_config_basedn().get_linearized()
# Get the old "dSHeuristics" if it was set
dsheuristics = self.ldb.get_dsheuristics()
# Set the "dSHeuristics" to activate the correct "userPassword" behaviour
self.ldb.set_dsheuristics("000000001")
# Reset the "dSHeuristics" as they were before
self.addCleanup(self.ldb.set_dsheuristics, dsheuristics)
# Get the old "minPwdAge"
minPwdAge = self.ldb.get_minPwdAge()
# Set it temporarely to "0"
self.ldb.set_minPwdAge("0")
self.base_dn = self.ldb.domain_dn()
# Reset the "minPwdAge" as it was before
self.addCleanup(self.ldb.set_minPwdAge, minPwdAge)
# (Re)adds the test user "testuser" with no password atm
delete_force(self.ldb, "cn=testuser,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser,cn=users," + self.base_dn,
"objectclass": "user",
"sAMAccountName": "testuser"})
# Tests a password change when we don't have any password yet with a
# wrong old password
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: noPassword
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, msg):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# Windows (2008 at least) seems to have some small bug here: it
# returns "0000056A" on longer (always wrong) previous passwords.
self.assertTrue('00000056' in msg)
# Sets the initial user password with a "special" password change
# I think that this internally is a password set operation and it can
# only be performed by someone which has password set privileges on the
# account (at least in s4 we do handle it like that).
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
add: userPassword
userPassword: thatsAcomplPASS1
""")
# But in the other way around this special syntax doesn't work
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# Enables the user account
self.ldb.enable_account("(sAMAccountName=testuser)")
# Open a second LDB connection with the user credentials. Use the
# command line credentials for informations like the domain, the realm
# and the workstation.
creds2 = Credentials()
creds2.set_username("testuser")
creds2.set_password("thatsAcomplPASS1")
creds2.set_domain(creds.get_domain())
creds2.set_realm(creds.get_realm())
creds2.set_workstation(creds.get_workstation())
creds2.set_gensec_features(creds2.get_gensec_features()
| gensec.FEATURE_SEAL)
self.ldb2 = SamDB(url=host, credentials=creds2, lp=lp)
def test_unicodePwd_hash_set(self):
"""Performs a password hash set operation on 'unicodePwd' which should be prevented"""
# Notice: Direct hash password sets should never work
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement("XXXXXXXXXXXXXXXX", FLAG_MOD_REPLACE,
"unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
def test_unicodePwd_hash_change(self):
"""Performs a password hash change operation on 'unicodePwd' which should be prevented"""
# Notice: Direct hash password changes should never work
# Hash password changes should never work
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd: XXXXXXXXXXXXXXXX
add: unicodePwd
unicodePwd: YYYYYYYYYYYYYYYY
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
def test_unicodePwd_clear_set(self):
"""Performs a password cleartext set operation on 'unicodePwd'"""
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement("\"thatsAcomplPASS2\"".encode('utf-16-le'),
FLAG_MOD_REPLACE, "unicodePwd")
self.ldb.modify(m)
def test_unicodePwd_clear_change(self):
"""Performs a password cleartext change operation on 'unicodePwd'"""
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS1\"".encode('utf-16-le')) + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')) + """
""")
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')) + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS4\"".encode('utf-16-le')) + """
""")
self.fail()
except LdbError, (num, msg):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')) + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')) + """
""")
self.fail()
except LdbError, (num, msg):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_dBCSPwd_hash_set(self):
"""Performs a password hash set operation on 'dBCSPwd' which should be prevented"""
# Notice: Direct hash password sets should never work
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement("XXXXXXXXXXXXXXXX", FLAG_MOD_REPLACE,
"dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
def test_dBCSPwd_hash_change(self):
"""Performs a password hash change operation on 'dBCSPwd' which should be prevented"""
# Notice: Direct hash password changes should never work
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: dBCSPwd
dBCSPwd: XXXXXXXXXXXXXXXX
add: dBCSPwd
dBCSPwd: YYYYYYYYYYYYYYYY
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
def test_userPassword_clear_set(self):
"""Performs a password cleartext set operation on 'userPassword'"""
# Notice: This works only against Windows if "dSHeuristics" has been set
# properly
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("thatsAcomplPASS2", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
def test_userPassword_clear_change(self):
"""Performs a password cleartext change operation on 'userPassword'"""
# Notice: This works only against Windows if "dSHeuristics" has been set
# properly
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS3
add: userPassword
userPassword: thatsAcomplPASS4
""")
self.fail()
except LdbError, (num, msg):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, msg):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_clearTextPassword_clear_set(self):
"""Performs a password cleartext set operation on 'clearTextPassword'"""
# Notice: This never works against Windows - only supported by us
try:
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement("thatsAcomplPASS2".encode('utf-16-le'),
FLAG_MOD_REPLACE, "clearTextPassword")
self.ldb.modify(m)
# this passes against s4
except LdbError, (num, msg):
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
raise LdbError(num, msg)
def test_clearTextPassword_clear_change(self):
"""Performs a password cleartext change operation on 'clearTextPassword'"""
# Notice: This never works against Windows - only supported by us
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS1".encode('utf-16-le')) + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')) + """
""")
# this passes against s4
except LdbError, (num, msg):
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
raise LdbError(num, msg)
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS3".encode('utf-16-le')) + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS4".encode('utf-16-le')) + """
""")
self.fail()
except LdbError, (num, msg):
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')) + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')) + """
""")
self.fail()
except LdbError, (num, msg):
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_failures(self):
"""Performs some failure testing"""
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
""")
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
# Reverse order does work
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS2
delete: userPassword
userPassword: thatsAcomplPASS1
""")
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS2
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')) + """
""")
# this passes against s4
except LdbError, (num, _):
self.assertEquals(num, ERR_ATTRIBUTE_OR_VALUE_EXISTS)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')) + """
add: userPassword
userPassword: thatsAcomplPASS4
""")
# this passes against s4
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_ATTRIBUTE)
# Several password changes at once are allowed
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
replace: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS2
""")
# Several password changes at once are allowed
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
replace: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
replace: userPassword
userPassword: thatsAcomplPASS4
""")
# This surprisingly should work
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": ["thatsAcomplPASS1", "thatsAcomplPASS2"] })
# This surprisingly should work
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": ["thatsAcomplPASS1", "thatsAcomplPASS1"] })
def test_empty_passwords(self):
print "Performs some empty passwords testing"
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"unicodePwd": [] })
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"dBCSPwd": [] })
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": [] })
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"clearTextPassword": [] })
self.fail()
except LdbError, (num, _):
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_ADD, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_ADD, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_ADD, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_ADD, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_REPLACE, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_REPLACE, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_REPLACE, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_REPLACE, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertTrue(num == ERR_UNWILLING_TO_PERFORM or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_DELETE, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_DELETE, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_DELETE, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_DELETE, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError, (num, _):
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
def test_plain_userPassword(self):
print "Performs testing about the standard 'userPassword' behaviour"
# Delete the "dSHeuristics"
self.ldb.set_dsheuristics(None)
time.sleep(1) # This switching time is strictly needed!
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword", FLAG_MOD_ADD,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEquals(res[0]["userPassword"][0], "myPassword")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword2", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEquals(res[0]["userPassword"][0], "myPassword2")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_DELETE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
# Set the test "dSHeuristics" to deactivate "userPassword" pwd changes
self.ldb.set_dsheuristics("000000000")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword3", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEquals(res[0]["userPassword"][0], "myPassword3")
# Set the test "dSHeuristics" to deactivate "userPassword" pwd changes
self.ldb.set_dsheuristics("000000002")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword4", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEquals(res[0]["userPassword"][0], "myPassword4")
# Reset the test "dSHeuristics" (reactivate "userPassword" pwd changes)
self.ldb.set_dsheuristics("000000001")
def test_zero_length(self):
# Get the old "minPwdLength"
minPwdLength = self.ldb.get_minPwdLength()
# Set it temporarely to "0"
self.ldb.set_minPwdLength("0")
# Get the old "pwdProperties"
pwdProperties = self.ldb.get_pwdProperties()
# Set them temporarely to "0" (to deactivate eventually the complexity)
self.ldb.set_pwdProperties("0")
self.ldb.setpassword("(sAMAccountName=testuser)", "")
# Reset the "pwdProperties" as they were before
self.ldb.set_pwdProperties(pwdProperties)
# Reset the "minPwdLength" as it was before
self.ldb.set_minPwdLength(minPwdLength)
def tearDown(self):
super(PasswordTests, self).tearDown()
delete_force(self.ldb, "cn=testuser,cn=users," + self.base_dn)
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
# Close the second LDB connection (with the user credentials)
self.ldb2 = None
if not "://" in host:
if os.path.isfile(host):
host = "tdb://%s" % host
else:
host = "ldap://%s" % host
TestProgram(module=__name__, opts=subunitopts)
|
azureplus/hue
|
refs/heads/master
|
desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/functional.py
|
55
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for functional languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Literal, Generic
__all__ = ['SchemeLexer', 'CommonLispLexer', 'HaskellLexer', 'LiterateHaskellLexer',
'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer']
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
*New in Pygments 0.6.*
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = [
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
]
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
]
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\".'_!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
*New in Pygments 0.9.*
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
### couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
### symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root' : [
('', Text, 'body'),
],
'multiline-comment' : [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form' : [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body' : [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \
+ terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# complex
(r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#[pP]?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
*New in Pygments 0.8.*
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
reserved = ['case','class','data','default','deriving','do','else',
'if','in','infix[lr]?','instance',
'let','newtype','of','then','type','where','_']
ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
'DC[1-4]','NAK','SYN','ETB','CAN',
'EM','SUB','ESC','[FGRU]S','SP','DEL']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
#(r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r'[_a-z][\w\']*', Name),
(r'[A-Z][\w\']*', Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
(r'[_a-z][\w\']+', Name.Function),
(r'--.*$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'{-', Comment.Multiline, '#push'),
(r'-}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@\^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
line_re = re.compile('.*?\n')
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
class LiterateHaskellLexer(Lexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 0.9.*
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def get_tokens_unprocessed(self, text):
hslexer = HaskellLexer(**self.options)
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.text import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
for item in do_insertions(insertions, hslexer.get_tokens_unprocessed(code)):
yield item
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
*New in Pygments 0.7.*
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = [
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'val', 'virtual', 'when', 'while', 'with'
]
keyopts = [
'!=','#','&','&&','\(','\)','\*','\+',',','-',
'-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<',
'<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|',
']','_','`','{','{<','\|','\|]','}','~'
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array']
tokens = {
'escape-sequence': [
(r'\\[\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'\(\*', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
],
}
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
*New in Pygments 0.9.*
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl']
mimetypes = ['text/x-erlang']
keywords = [
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
]
builtins = [ # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
]
operators = r'(\+|-|\*|/|<|>|=|==|/=|=:=|=/=|=<|>=|\+\+|--|<-|!)'
word_operators = [
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
]
atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)'
escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
('(' + '|'.join(keywords) + r')\b', Keyword),
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
('(' + '|'.join(word_operators) + r')\b', Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
(r'('+atom_re+')(:)', bygroups(Name.Namespace, Punctuation)),
(r'^('+atom_re+r')(\s*)(\()', bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[][:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
*New in Pygments 1.1.*
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
|
Dhivyap/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_system_autoupdate_push_update.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_autoupdate_push_update
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_autoupdate_push_update.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_autoupdate_push_update_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_autoupdate_push_update': {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_autoupdate_push_update.fortios_system_autoupdate(input_data, fos_instance)
expected_data = {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
}
set_method_mock.assert_called_with('system.autoupdate', 'push-update', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_autoupdate_push_update_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_autoupdate_push_update': {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_autoupdate_push_update.fortios_system_autoupdate(input_data, fos_instance)
expected_data = {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
}
set_method_mock.assert_called_with('system.autoupdate', 'push-update', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_autoupdate_push_update_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_autoupdate_push_update': {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_autoupdate_push_update.fortios_system_autoupdate(input_data, fos_instance)
expected_data = {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
}
set_method_mock.assert_called_with('system.autoupdate', 'push-update', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_autoupdate_push_update_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_autoupdate_push_update': {
'random_attribute_not_valid': 'tag',
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_autoupdate_push_update.fortios_system_autoupdate(input_data, fos_instance)
expected_data = {
'address': 'test_value_3',
'override': 'enable',
'port': '5',
'status': 'enable'
}
set_method_mock.assert_called_with('system.autoupdate', 'push-update', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
dataculture/mca
|
refs/heads/master
|
tests/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from . import test_mca
|
supersven/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/fr/fr_department.py
|
314
|
# -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
|
kovacsbalu/ansible-modules-extras
|
refs/heads/devel
|
cloud/centurylink/clc_group.py
|
60
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account passwod for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: 'My Cool Server Group'
parent: 'Default Group'
state: present
register: clc
- name: debug
debug: var=clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: 'My Cool Server Group'
parent: 'Default Group'
state: absent
register: clc
- name: debug
debug: var=clc
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
else:
changed, group, requests = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
if requests:
self._wait_for_requests_to_complete(requests)
self.module.exit_json(changed=changed, group=group_name)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
location=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
requests = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
request = self._delete_group(group_name)
requests.append(request)
changed = True
return changed, group_name, requests
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException, ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
assert self.root_group, "Implementation Error: Root Group not set"
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
self._create_group(
group=group_name,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group_name, None
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException, ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text
))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
|
cantino/newspaper
|
refs/heads/master
|
newspaper/packages/jieba/__init__.py
|
2
|
from __future__ import with_statement
__version__ = '0.31'
__license__ = 'MIT'
import re
import os
import sys
import finalseg
import time
import tempfile
import marshal
from math import log
import random
import threading
from functools import wraps
DICTIONARY = "dict.txt"
DICT_LOCK = threading.RLock()
trie = None # to be initialized
FREQ = {}
min_freq = 0.0
total =0.0
user_word_tag_tab={}
initialized = False
def gen_trie(f_name):
lfreq = {}
trie = {}
ltotal = 0.0
with open(f_name, 'rb') as f:
lineno = 0
for line in f.read().rstrip().decode('utf-8').split('\n'):
lineno += 1
try:
word,freq,_ = line.split(' ')
freq = float(freq)
lfreq[word] = freq
ltotal+=freq
p = trie
for c in word:
if not c in p:
p[c] ={}
p = p[c]
p['']='' #ending flag
except ValueError, e:
print >> sys.stderr, f_name, ' at line', lineno, line
raise e
return trie, lfreq,ltotal
def initialize(*args):
global trie, FREQ, total, min_freq, initialized
if len(args)==0:
dictionary = DICTIONARY
else:
dictionary = args[0]
with DICT_LOCK:
if initialized:
return
if trie:
del trie
trie = None
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
abs_path = os.path.join(_curpath,dictionary)
print >> sys.stderr, "Building Trie..., from " + abs_path
t1 = time.time()
if abs_path == os.path.join(_curpath,"dict.txt"): #defautl dictionary
cache_file = os.path.join(tempfile.gettempdir(),"jieba.cache")
else: #customer dictionary
cache_file = os.path.join(tempfile.gettempdir(),"jieba.user."+str(hash(abs_path))+".cache")
load_from_cache_fail = True
if os.path.exists(cache_file) and os.path.getmtime(cache_file)>os.path.getmtime(abs_path):
print >> sys.stderr, "loading model from cache " + cache_file
try:
trie,FREQ,total,min_freq = marshal.load(open(cache_file,'rb'))
load_from_cache_fail = False
except:
load_from_cache_fail = True
if load_from_cache_fail:
trie,FREQ,total = gen_trie(abs_path)
FREQ = dict([(k,log(float(v)/total)) for k,v in FREQ.iteritems()]) #normalize
min_freq = min(FREQ.itervalues())
print >> sys.stderr, "dumping model to file cache " + cache_file
try:
tmp_suffix = "."+str(random.random())
with open(cache_file+tmp_suffix,'wb') as temp_cache_file:
marshal.dump((trie,FREQ,total,min_freq),temp_cache_file)
if os.name=='nt':
import shutil
replace_file = shutil.move
else:
replace_file = os.rename
replace_file(cache_file+tmp_suffix,cache_file)
except:
print >> sys.stderr, "dump cache file failed."
import traceback
print >> sys.stderr, traceback.format_exc()
initialized = True
print >> sys.stderr, "loading model cost ", time.time() - t1, "seconds."
print >> sys.stderr, "Trie has been built succesfully."
def require_initialized(fn):
global initialized,DICTIONARY
@wraps(fn)
def wrapped(*args, **kwargs):
if initialized:
return fn(*args, **kwargs)
else:
initialize(DICTIONARY)
return fn(*args, **kwargs)
return wrapped
def __cut_all(sentence):
dag = get_DAG(sentence)
old_j = -1
for k,L in dag.iteritems():
if len(L)==1 and k>old_j:
yield sentence[k:L[0]+1]
old_j = L[0]
else:
for j in L:
if j>k:
yield sentence[k:j+1]
old_j = j
def calc(sentence,DAG,idx,route):
N = len(sentence)
route[N] = (0.0,'')
for idx in xrange(N-1,-1,-1):
candidates = [ ( FREQ.get(sentence[idx:x+1],min_freq) + route[x+1][0],x ) for x in DAG[idx] ]
route[idx] = max(candidates)
@require_initialized
def get_DAG(sentence):
N = len(sentence)
i,j=0,0
p = trie
DAG = {}
while i<N:
c = sentence[j]
if c in p:
p = p[c]
if '' in p:
if not i in DAG:
DAG[i]=[]
DAG[i].append(j)
j+=1
if j>=N:
i+=1
j=i
p=trie
else:
p = trie
i+=1
j=i
for i in xrange(len(sentence)):
if not i in DAG:
DAG[i] =[i]
return DAG
def __cut_DAG(sentence):
DAG = get_DAG(sentence)
route ={}
calc(sentence,DAG,0,route=route)
x = 0
buf =u''
N = len(sentence)
while x<N:
y = route[x][1]+1
l_word = sentence[x:y]
if y-x==1:
buf+= l_word
else:
if len(buf)>0:
if len(buf)==1:
yield buf
buf=u''
else:
if not (buf in FREQ):
regognized = finalseg.cut(buf)
for t in regognized:
yield t
else:
for elem in buf:
yield elem
buf=u''
yield l_word
x =y
if len(buf)>0:
if len(buf)==1:
yield buf
else:
if not (buf in FREQ):
regognized = finalseg.cut(buf)
for t in regognized:
yield t
else:
for elem in buf:
yield elem
def cut(sentence,cut_all=False):
if not isinstance(sentence, unicode):
try:
sentence = sentence.decode('utf-8')
except UnicodeDecodeError:
sentence = sentence.decode('gbk','ignore')
re_han, re_skip = re.compile(ur"([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)", re.U), re.compile(ur"(\r\n|\s)", re.U)
if cut_all:
re_han, re_skip = re.compile(ur"([\u4E00-\u9FA5]+)", re.U), re.compile(ur"[^a-zA-Z0-9+#\n]", re.U)
blocks = re_han.split(sentence)
cut_block = __cut_DAG
if cut_all:
cut_block = __cut_all
for blk in blocks:
if re_han.match(blk):
#pprint.pprint(__cut_DAG(blk))
for word in cut_block(blk):
yield word
else:
tmp = re_skip.split(blk)
for x in tmp:
if re_skip.match(x):
yield x
elif not cut_all:
for xx in x:
yield xx
else:
yield x
def cut_for_search(sentence):
words = cut(sentence)
for w in words:
if len(w)>2:
for i in xrange(len(w)-1):
gram2 = w[i:i+2]
if gram2 in FREQ:
yield gram2
if len(w)>3:
for i in xrange(len(w)-2):
gram3 = w[i:i+3]
if gram3 in FREQ:
yield gram3
yield w
@require_initialized
def load_userdict(f):
global trie,total,FREQ
if isinstance(f, (str, unicode)):
f = open(f, 'rb')
content = f.read().decode('utf-8')
line_no = 0
for line in content.split("\n"):
line_no+=1
if line.rstrip()=='': continue
tup =line.split(" ")
word,freq = tup[0],tup[1]
if line_no==1:
word = word.replace(u'\ufeff',u"") #remove bom flag if it exists
if len(tup)==3:
add_word(word, freq, tup[2])
else:
add_word(word, freq)
def add_word(word, freq, tag=None):
global FREQ, trie, total, user_word_tag_tab
freq = float(freq)
FREQ[word] = log(freq / total)
if tag is not None:
user_word_tag_tab[word] = tag.strip()
p = trie
for c in word:
if not c in p:
p[c] = {}
p = p[c]
p[''] = '' # ending flag
__ref_cut = cut
__ref_cut_for_search = cut_for_search
def __lcut(sentence):
return list(__ref_cut(sentence,False))
def __lcut_all(sentence):
return list(__ref_cut(sentence,True))
def __lcut_for_search(sentence):
return list(__ref_cut_for_search(sentence))
@require_initialized
def enable_parallel(processnum=None):
global pool,cut,cut_for_search
if os.name=='nt':
raise Exception("jieba: parallel mode only supports posix system")
if sys.version_info[0]==2 and sys.version_info[1]<6:
raise Exception("jieba: the parallel feature needs Python version>2.5 ")
from multiprocessing import Pool,cpu_count
if processnum==None:
processnum = cpu_count()
pool = Pool(processnum)
def pcut(sentence,cut_all=False):
parts = re.compile('([\r\n]+)').split(sentence)
if cut_all:
result = pool.map(__lcut_all,parts)
else:
result = pool.map(__lcut,parts)
for r in result:
for w in r:
yield w
def pcut_for_search(sentence):
parts = re.compile('([\r\n]+)').split(sentence)
result = pool.map(__lcut_for_search,parts)
for r in result:
for w in r:
yield w
cut = pcut
cut_for_search = pcut_for_search
def disable_parallel():
global pool,cut,cut_for_search
if 'pool' in globals():
pool.close()
pool = None
cut = __ref_cut
cut_for_search = __ref_cut_for_search
def set_dictionary(dictionary_path):
global initialized, DICTIONARY
with DICT_LOCK:
abs_path = os.path.normpath( os.path.join( os.getcwd(), dictionary_path ) )
if not os.path.exists(abs_path):
raise Exception("jieba: path does not exists:" + abs_path)
DICTIONARY = abs_path
initialized = False
def get_abs_path_dict():
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
abs_path = os.path.join(_curpath,DICTIONARY)
return abs_path
def tokenize(unicode_sentence,mode="default"):
#mode ("default" or "search")
if not isinstance(unicode_sentence, unicode):
raise Exception("jieba: the input parameter should unicode.")
start = 0
if mode=='default':
for w in cut(unicode_sentence):
width = len(w)
yield (w,start,start+width)
start+=width
else:
for w in cut(unicode_sentence):
width = len(w)
if len(w)>2:
for i in xrange(len(w)-1):
gram2 = w[i:i+2]
if gram2 in FREQ:
yield (gram2,start+i,start+i+2)
if len(w)>3:
for i in xrange(len(w)-2):
gram3 = w[i:i+3]
if gram3 in FREQ:
yield (gram3,start+i,start+i+3)
yield (w,start,start+width)
start+=width
|
leki75/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ovs/openvswitch_port.py
|
42
|
#!/usr/bin/python
#coding: utf-8 -*-
# pylint: disable=C0111
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
tag:
version_added: 2.2
required: false
description:
- VLAN tag for this port. Must be a value between
0 and 4095.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: {}
description:
- Dictionary of external_ids applied to a port.
set:
version_added: 2.0
required: false
default: None
description:
- Set a single property on a port.
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: eth2
state: present
# Creates port eth6
- openvswitch_port:
bridge: bridge-loop
port: eth6
state: present
set: Interface eth6
# Creates port vlan10 with tag 10 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: vlan10
tag: 10
state: present
set: Interface vlan10
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port:
bridge: br-int
port: vifeth6
state: present
args:
external_ids:
iface-id: '{{ inventory_hostname }}-vifeth6'
attached-mac: '00:00:5E:00:53:23'
vm-id: '{{ inventory_hostname }}'
iface-status: active
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.pycompat24 import get_exception
def _external_ids_to_dict(text):
text = text.strip()
if text == '{}':
return None
else:
d = {}
for kv in text[1:-1].split(','):
kv = kv.strip()
k, v = kv.split('=')
d[k] = v
return d
def _tag_to_str(text):
text = text.strip()
if text == '[]':
return None
else:
return text
def map_obj_to_commands(want, have, module):
commands = list()
if module.params['state'] == 'absent':
if have:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
commands.append(command)
else:
if have:
if want['tag'] != have['tag']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s tag=%(tag)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids'] != have['external_ids']:
for k, v in iteritems(want['external_ids']):
if (not have['external_ids']
or k not in have['external_ids']
or want['external_ids'][k] != have['external_ids'][k]):
if v is None:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" remove port %(port)s"
" external_ids " + k)
command = templatized_command % module.params
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s"
" external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
if want['tag']:
templatized_command = " tag=%(tag)s"
command += templatized_command % module.params
if want['set']:
templatized_command = " -- set %(set)s"
command += templatized_command % module.params
commands.append(command)
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
return commands
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list-ports %(bridge)s"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
obj = {}
if module.params['port'] in out.splitlines():
obj['bridge'] = module.params['bridge']
obj['port'] = module.params['port']
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s tag")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['tag'] = _tag_to_str(out)
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s external_ids")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['external_ids'] = _external_ids_to_dict(out)
return obj
def map_params_to_obj(module):
obj = {
'bridge': module.params['bridge'],
'port': module.params['port'],
'tag': module.params['tag'],
'external_ids': module.params['external_ids'],
'set': module.params['set']
}
return obj
# pylint: disable=E0602
def main():
""" Entry point. """
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'tag': {'default': None},
'set': {'required': False, 'default': None}
}
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
for c in commands:
module.run_command(c, check_rc=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
asrob-uc3m/rpc_rpi
|
refs/heads/master
|
src/python/opencv_python_tutorials/Official_Tutorial_Python_Codes/3_imgproc/smoothing.py
|
1
|
''' file name : smoothing.py
Description : This sample shows how to smooth image using various filters
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.html#smoothing
Level : Beginner
Benefits : Learn to use 1) Blur, 2) GaussianBlur, 3) MedianBlur, 4) BilateralFilter and differences between them
Usage : python smoothing.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials '''
import cv2
import numpy as np
DELAY_CAPTION = 1500;
DELAY_BLUR = 500;
img = cv2.imread('lena.jpg')
for i in xrange(1,31,2):
blur = cv2.blur(img,(i,i))
string = 'blur : kernel size - '+str(i)
cv2.putText(blur,string,(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255))
cv2.imshow('Blur',blur)
cv2.waitKey(DELAY_BLUR)
for i in xrange(1,31,2):
gaussian_blur = cv2.GaussianBlur(img,(i,i),0)
string = 'guassian_blur : kernel size - '+str(i)
cv2.putText(gaussian_blur,string,(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255))
cv2.imshow('Blur',gaussian_blur)
cv2.waitKey(DELAY_BLUR)
cv2.waitKey(DELAY_CAPTION)
for i in xrange(1,31,2):
median_blur = cv2.medianBlur(img,i)
string = 'median_blur : kernel size - '+str(i)
cv2.putText(median_blur,string,(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255))
cv2.imshow('Blur',median_blur)
cv2.waitKey(DELAY_BLUR)
cv2.waitKey(DELAY_CAPTION)
for i in xrange(1,31,2): # Remember, bilateral is a bit slow, so as value go higher, it takes long time
bilateral_blur = cv2.bilateralFilter(img,i, i*2,i/2)
string = 'bilateral_blur : kernel size - '+str(i)
cv2.putText(bilateral_blur,string,(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255))
cv2.imshow('Blur',bilateral_blur)
cv2.waitKey(DELAY_BLUR)
cv2.waitKey(DELAY_CAPTION)
cv2.destroyAllWindows()
## For more info about this , visit: http://opencvpython.blogspot.com/2012/06/smoothing-techniques-in-opencv.html
|
stefanhenneking/mxnet
|
refs/heads/master
|
tests/nightly/test_tutorial.py
|
8
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, invalid-name
"""
This script converts all python tutorials into python script
and tests whether there is any warning or error.
After running python script, it will also convert markdown files
to notebooks to make sure notebook execution has no error.
"""
import os
import warnings
import imp
import traceback
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
fail_dict = {}
def test_tutorial(file_path):
"""Run tutorial python script and save any error or warning.
If no error or warning occurs, run notebook.
Parameters
----------
file_path : str
path of tutorial markdown file
"""
with warnings.catch_warnings(record=True) as w:
tutorial_name = os.path.basename(file_path)
print file_path + '.py'
try:
imp.load_source('tutorial', file_path + '.py')
if len(w) > 0:
err_msg = "%s.py has %d warnings.\n" % (tutorial_name, len(w))
fail_dict[tutorial_name] = err_msg
else:
test_tutorial_nb(file_path)
except Exception:
err_msg = "%s.py has error:\n%s" % (tutorial_name, traceback.format_exc())
fail_dict[tutorial_name] = err_msg
def test_tutorial_nb(file_path):
"""Run tutorial jupyter notebook to catch any execution error.
Parameters
----------
file_path : str
path of tutorial markdown file
"""
tutorial_name = os.path.basename(file_path)
notebook = nbformat.read(file_path + '.ipynb', as_version=4)
eprocessor = ExecutePreprocessor(timeout=1800)
try:
eprocessor.preprocess(notebook, {'metadata': {}})
except Exception as err:
err_msg = str(err)
fail_dict[tutorial_name] = err_msg
finally:
output_nb = open("output.txt", mode='w')
nbformat.write(notebook, output_nb)
output_nb.close()
output_nb = open("output.txt", mode='r')
for line in output_nb:
if "Warning:" in line:
fail_dict[tutorial_name] = "%s has warning." % (tutorial_name)
return
if __name__ == "__main__":
tutorial_dir = '../../docs/_build/html/tutorials/'
with open('test_tutorial_config.txt') as config_file:
tutorial_list = []
for line in config_file:
tutorial_list.append(line.lstrip().rstrip())
file_dir = tutorial_dir + line.lstrip().rstrip()
test_tutorial_nb(file_dir)
fail_num = len(fail_dict)
success_num = len(tutorial_list) - fail_num
print "Test Summary Start"
print "%d tutorials tested:" % (len(tutorial_list))
for tutorial in tutorial_list:
print tutorial
print "\n%d tests failed:" % (fail_num)
for tutorial, msg in fail_dict.items():
print tutorial + ":"
print msg
print "Test Summary End"
print "Stats start"
print "[Passed: %d of %d]" % (success_num, len(tutorial_list))
print "Stats end"
|
fernandopinhati/oppia
|
refs/heads/master
|
core/controllers/editor_test.py
|
7
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the exploration editor page."""
__author__ = 'Sean Lip'
import os
import StringIO
import zipfile
from core.controllers import editor
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class BaseEditorControllerTest(test_utils.GenericTestBase):
CAN_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'true\');'
CANNOT_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'false\');'
def setUp(self):
"""Completes the sign-up process for self.EDITOR_EMAIL."""
super(BaseEditorControllerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
def assert_can_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
editable."""
self.assertIn(self.CAN_EDIT_STR, response_body)
self.assertNotIn(self.CANNOT_EDIT_STR, response_body)
def assert_cannot_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
not editable."""
self.assertIn(self.CANNOT_EDIT_STR, response_body)
self.assertNotIn(self.CAN_EDIT_STR, response_body)
class EditorTest(BaseEditorControllerTest):
def test_editor_page(self):
"""Test access to editor pages for the sample exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
# Check that non-editors can access, but not edit, the editor page.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
self.assertIn('Welcome to Oppia!', response.body)
self.assert_cannot_edit(response.body)
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
# Check that it is now possible to access and edit the editor page.
response = self.testapp.get('/create/0')
self.assertIn('Welcome to Oppia!', response.body)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.assertIn('Stats', response.body)
self.assertIn('History', response.body)
# Test that the value generator JS is included.
self.assertIn('RandomSelector', response.body)
self.logout()
def test_new_state_template(self):
"""Test the validity of the NEW_STATE_TEMPLATE."""
exp_services.load_demo('0')
exploration = exp_services.get_exploration_by_id('0')
exploration.add_states([feconf.DEFAULT_INIT_STATE_NAME])
new_state_dict = exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].to_dict()
new_state_dict['unresolved_answers'] = {}
self.assertEqual(new_state_dict, editor.NEW_STATE_TEMPLATE)
def test_add_new_state_error_cases(self):
"""Test the error cases for adding a new state to an exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
CURRENT_VERSION = 1
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
def _get_payload(new_state_name, version=None):
result = {
'change_list': [{
'cmd': 'add_state',
'state_name': new_state_name
}],
'commit_message': 'Add new state',
}
if version is not None:
result['version'] = version
return result
def _put_and_expect_400_error(payload):
return self.put_json(
'/createhandler/data/0', payload, csrf_token,
expect_errors=True, expected_status_int=400)
# A request with no version number is invalid.
response_dict = _put_and_expect_400_error(_get_payload('New state'))
self.assertIn('a version must be specified', response_dict['error'])
# A request with the wrong version number is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('New state', 123))
self.assertIn('which is too old', response_dict['error'])
# A request with an empty state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('', CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a really long state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('a' * 100, CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a state name containing invalid characters is
# invalid.
response_dict = _put_and_expect_400_error(
_get_payload('[Bad State Name]', CURRENT_VERSION))
self.assertIn('Invalid character [', response_dict['error'])
# A request with a state name of feconf.END_DEST is invalid.
response_dict = _put_and_expect_400_error(
_get_payload(feconf.END_DEST, CURRENT_VERSION))
self.assertIn('Invalid state name', response_dict['error'])
# Even if feconf.END_DEST is mixed case, it is still invalid.
response_dict = _put_and_expect_400_error(
_get_payload('eNd', CURRENT_VERSION))
self.assertEqual('eNd'.lower(), feconf.END_DEST.lower())
self.assertIn('Invalid state name', response_dict['error'])
# A name cannot have spaces at the front or back.
response_dict = _put_and_expect_400_error(
_get_payload(' aa', CURRENT_VERSION))
self.assertIn('start or end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('aa\t', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('\n', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
# A name cannot have consecutive whitespace.
response_dict = _put_and_expect_400_error(
_get_payload('The B', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('The\t\tB', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
self.logout()
def test_resolved_answers_handler(self):
exp_services.delete_demo('0')
exp_services.load_demo('0')
# In the reader perspective, submit the first multiple-choice answer,
# then submit 'blah' once, 'blah2' twice and 'blah3' three times.
# TODO(sll): Use the ExplorationPlayer in reader_test for this.
exploration_dict = self.get_json(
'%s/0' % feconf.EXPLORATION_INIT_URL_PREFIX)
self.assertEqual(
exploration_dict['exploration']['title'], 'Welcome to Oppia!')
state_name = exploration_dict['exploration']['init_state_name']
exploration_dict = self.submit_answer('0', state_name, '0')
state_name = exploration_dict['state_name']
self.submit_answer('0', state_name, 'blah')
for _ in range(2):
self.submit_answer('0', state_name, 'blah2')
for _ in range(3):
self.submit_answer('0', state_name, 'blah3')
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
url = str('/createhandler/resolved_answers/0/%s' % state_name)
def _get_unresolved_answers():
return stats_domain.StateRuleAnswerLog.get(
'0', state_name, feconf.SUBMIT_HANDLER_NAME,
exp_domain.DEFAULT_RULESPEC_STR
).answers
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# An empty request should result in an error.
response_dict = self.put_json(
url, {'something_else': []}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# A request of the wrong type should result in an error.
response_dict = self.put_json(
url, {'resolved_answers': 'this_is_a_string'}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# Trying to remove an answer that wasn't submitted has no effect.
response_dict = self.put_json(
url, {'resolved_answers': ['not_submitted_answer']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# A successful request should remove the answer in question.
response_dict = self.put_json(
url, {'resolved_answers': ['blah']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah2': 2, 'blah3': 3})
# It is possible to remove more than one answer at a time.
response_dict = self.put_json(
url, {'resolved_answers': ['blah2', 'blah3']}, csrf_token)
self.assertEqual(_get_unresolved_answers(), {})
self.logout()
class DownloadIntegrationTest(BaseEditorControllerTest):
"""Test handler for exploration and state download."""
SAMPLE_JSON_CONTENT = {
"State A": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
param_changes: []
"""),
"State B": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State B
feedback: []
param_changes: []
id: TextInput
param_changes: []
"""),
feconf.DEFAULT_INIT_STATE_NAME: ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: %s
feedback: []
param_changes: []
id: TextInput
param_changes: []
""") % feconf.DEFAULT_INIT_STATE_NAME
}
SAMPLE_STATE_STRING = (
"""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
def test_exploration_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for ZIP download handler test!',
category='This is just a test category',
objective='')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.states[exploration.init_state_name].interaction.handlers[
0].rule_specs[0].dest = exploration.init_state_name
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download to zip file
# Download to zip file using download handler
EXPLORATION_DOWNLOAD_URL = '/createhandler/download/%s' % EXP_ID
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
# Check downloaded zip file
self.assertEqual(response.headers['Content-Type'], 'text/plain')
filename = 'oppia-ThetitleforZIPdownloadhandlertest!-v2.zip'
self.assertEqual(response.headers['Content-Disposition'],
'attachment; filename=%s' % str(filename))
zf_saved = zipfile.ZipFile(StringIO.StringIO(response.body))
self.assertEqual(
zf_saved.namelist(),
['The title for ZIP download handler test!.yaml'])
# Load golden zip file
with open(os.path.join(
feconf.TESTS_DATA_DIR,
'oppia-ThetitleforZIPdownloadhandlertest!-v2-gold.zip'),
'rb') as f:
golden_zipfile = f.read()
zf_gold = zipfile.ZipFile(StringIO.StringIO(golden_zipfile))
# Compare saved with golden file
self.assertEqual(
zf_saved.open(
'The title for ZIP download handler test!.yaml'
).read(),
zf_gold.open(
'The title for ZIP download handler test!.yaml'
).read())
# Check download to JSON
exploration.update_objective('Test JSON download')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
# Download to JSON string using download handler
self.maxDiff = None
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download/%s?output_format=%s&width=50' %
(EXP_ID, feconf.OUTPUT_FORMAT_JSON))
response = self.get_json(EXPLORATION_DOWNLOAD_URL)
# Check downloaded dict
self.assertEqual(self.SAMPLE_JSON_CONTENT, response)
self.logout()
def test_state_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for states download handler test!',
category='This is just a test category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download state as YAML string
self.maxDiff = None
state_name = 'State%20A'
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download_state/%s?state=%s&width=50' %
(EXP_ID, state_name))
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
self.assertEqual(self.SAMPLE_STATE_STRING, response.body)
self.logout()
class ExplorationDeletionRightsTest(BaseEditorControllerTest):
def test_deletion_rights_for_unpublished_exploration(self):
"""Test rights management for deletion of unpublished explorations."""
UNPUBLISHED_EXP_ID = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
UNPUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, UNPUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
def test_deletion_rights_for_published_exploration(self):
"""Test rights management for deletion of published explorations."""
PUBLISHED_EXP_ID = 'published_eid'
exploration = exp_domain.Exploration.create_default_exploration(
PUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, PUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_exploration(self.owner_id, PUBLISHED_EXP_ID)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.ADMIN_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
class VersioningIntegrationTest(BaseEditorControllerTest):
"""Test retrieval of and reverting to old exploration versions."""
def setUp(self):
"""Create exploration with two versions"""
super(VersioningIntegrationTest, self).setUp()
self.EXP_ID = '0'
exp_services.delete_demo(self.EXP_ID)
exp_services.load_demo(self.EXP_ID)
self.login(self.EDITOR_EMAIL)
# In version 2, change the objective and the initial state content.
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.EDITOR_EMAIL, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective',
}, {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': exploration.init_state_name,
'new_value': [{'type': 'text', 'value': 'ABC'}],
}], 'Change objective and init state content')
def test_reverting_to_old_exploration(self):
"""Test reverting to old exploration versions."""
# Open editor page
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, self.EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# May not revert to any version that's not 1
for rev_version in (-1, 0, 2, 3, 4, '1', ()):
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token, expect_errors=True, expected_status_int=400)
# Check error message
if not isinstance(rev_version, int):
self.assertIn('Expected an integer', response_dict['error'])
else:
self.assertIn('Cannot revert to version',
response_dict['error'])
# Check that exploration is really not reverted to old version
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# Revert to version 1
rev_version = 1
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token)
# Check that exploration is really reverted to version 1
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertNotIn('ABC', init_content)
self.assertIn('Hi, welcome to Oppia!', init_content)
def test_versioning_for_default_exploration(self):
"""Test retrieval of old exploration versions."""
# The latest version contains 'ABC'.
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v1 contains 'Hi, welcome to Oppia!'.
reader_dict = self.get_json(
'%s/%s?v=1' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('Hi, welcome to Oppia!', init_content)
self.assertNotIn('ABC', init_content)
# v2 contains 'ABC'.
reader_dict = self.get_json(
'%s/%s?v=2' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v3 does not exist.
response = self.testapp.get(
'%s/%s?v=3' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID),
expect_errors=True)
self.assertEqual(response.status_int, 404)
class ExplorationEditRightsTest(BaseEditorControllerTest):
"""Test the handling of edit rights for explorations."""
def test_user_banning(self):
"""Test that banned users are banned."""
EXP_ID = '0'
exp_services.delete_demo(EXP_ID)
exp_services.load_demo(EXP_ID)
# Sign-up new editors Joe and Sandra.
self.signup('joe@example.com', 'joe')
self.signup('sandra@example.com', 'sandra')
# Joe logs in.
self.login('joe@example.com')
response = self.testapp.get(feconf.GALLERY_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
# Ban joe.
config_services.set_property(
feconf.ADMIN_COMMITTER_ID, 'banned_usernames', ['joe'])
# Test that Joe is banned. (He can still access the gallery.)
response = self.testapp.get(feconf.GALLERY_URL, expect_errors=True)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
# Joe logs out.
self.logout()
# Sandra logs in and is unaffected.
self.login('sandra@example.com')
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.logout()
class ExplorationRightsIntegrationTest(BaseEditorControllerTest):
"""Test the handler for managing exploration editing rights."""
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collab'
COLLABORATOR2_EMAIL = 'collaborator2@example.com'
COLLABORATOR2_USERNAME = 'collab2'
COLLABORATOR3_EMAIL = 'collaborator3@example.com'
COLLABORATOR3_USERNAME = 'collab3'
VIEWER2_EMAIL = 'viewer2@example.com'
def test_exploration_rights_handler(self):
"""Test exploration rights handler."""
# Create several users
self.signup(
self.COLLABORATOR_EMAIL, username=self.COLLABORATOR_USERNAME)
self.signup(
self.COLLABORATOR2_EMAIL, username=self.COLLABORATOR2_USERNAME)
self.signup(
self.COLLABORATOR3_EMAIL, username=self.COLLABORATOR3_USERNAME)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
# Owner creates exploration
self.login(self.OWNER_EMAIL)
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.owner_id, title='Title for rights handler test!',
category='My category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# Owner adds rights for other users
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.VIEWER_USERNAME,
'new_member_role': rights_manager.ROLE_VIEWER
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR2_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.logout()
# Check that viewer can access editor page but cannot edit.
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
self.logout()
# Check that collaborator can access editor page and can edit.
self.login(self.COLLABORATOR_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator can add a new state called 'State 4'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 4',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 4'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 4',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 4', response_dict['states'])
# Check that collaborator cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
# Check that collaborator2 can access editor page and can edit.
self.login(self.COLLABORATOR2_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator2 can add a new state called 'State 5'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 5',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 5'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 5',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 5', response_dict['states'])
# Check that collaborator2 cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
|
tdyas/pants
|
refs/heads/master
|
contrib/awslambda/python/src/python/pants/__init__.py
|
321
|
__import__("pkg_resources").declare_namespace(__name__)
|
vishwaprakashmishra/xmatrix
|
refs/heads/master
|
vumi/transports/smpp/processors/sixdee.py
|
3
|
# -*- test-case-name: vumi.transports.smpp.tests.test_sixdee -*-
from vumi.config import ConfigInt
from vumi.components.session import SessionManager
from vumi.message import TransportUserMessage
from vumi.transports.smpp.processors import default
from vumi import log
from twisted.internet.defer import inlineCallbacks, returnValue
def make_vumi_session_identifier(msisdn, sixdee_session_identifier):
return '%s+%s' % (msisdn, sixdee_session_identifier)
class DeliverShortMessageProcessorConfig(
default.DeliverShortMessageProcessorConfig):
max_session_length = ConfigInt(
'Maximum length a USSD sessions data is to be kept for in seconds.',
default=60 * 3, static=True)
class DeliverShortMessageProcessor(default.DeliverShortMessageProcessor):
CONFIG_CLASS = DeliverShortMessageProcessorConfig
# NOTE: these keys are hexidecimal because of python-smpp encoding
# quirkiness
ussd_service_op_map = {
'01': 'new',
'12': 'continue',
'81': 'close', # user abort
}
def __init__(self, transport, config):
super(DeliverShortMessageProcessor, self).__init__(transport, config)
self.transport = transport
self.redis = transport.redis
self.config = self.CONFIG_CLASS(config, static=True)
self.session_manager = SessionManager(
self.redis, max_session_length=self.config.max_session_length)
@inlineCallbacks
def handle_deliver_sm_ussd(self, pdu, pdu_params, pdu_opts):
service_op = pdu_opts['ussd_service_op']
# 6D uses its_session_info as follows:
#
# * First 15 bit: dialog id (i.e. session id)
# * Last bit: end session (1 to end, 0 to continue)
its_session_number = int(pdu_opts['its_session_info'], 16)
end_session = bool(its_session_number % 2)
sixdee_session_identifier = "%04x" % (its_session_number & 0xfffe)
vumi_session_identifier = make_vumi_session_identifier(
pdu_params['source_addr'], sixdee_session_identifier)
if end_session:
session_event = 'close'
else:
session_event = self.ussd_service_op_map.get(service_op)
if session_event == 'new':
# PSSR request. Let's assume it means a new session.
ussd_code = pdu_params['short_message']
content = None
yield self.session_manager.create_session(
vumi_session_identifier, ussd_code=ussd_code)
elif session_event == 'close':
session = yield self.session_manager.load_session(
vumi_session_identifier)
ussd_code = session['ussd_code']
content = None
yield self.session_manager.clear_session(vumi_session_identifier)
else:
if session_event != 'continue':
log.warning(('Received unknown %r ussd_service_op, '
'assuming continue.') % (service_op,))
session_event = 'continue'
session = yield self.session_manager.load_session(
vumi_session_identifier)
ussd_code = session['ussd_code']
content = pdu_params['short_message']
# This is stashed on the message and available when replying
# with a `submit_sm`
session_info = {
'ussd_service_op': service_op,
'session_identifier': sixdee_session_identifier,
}
decoded_msg = self.dcs_decode(content,
pdu_params['data_coding'])
result = yield self.handle_short_message_content(
source_addr=pdu_params['source_addr'],
destination_addr=ussd_code,
short_message=decoded_msg,
message_type='ussd',
session_event=session_event,
session_info=session_info)
returnValue(result)
class SubmitShortMessageProcessorConfig(
default.SubmitShortMessageProcessorConfig):
max_session_length = ConfigInt(
'Maximum length a USSD sessions data is to be kept for in seconds.',
default=60 * 3, static=True)
class SubmitShortMessageProcessor(default.SubmitShortMessageProcessor):
CONFIG_CLASS = SubmitShortMessageProcessorConfig
# NOTE: these values are hexidecimal because of python-smpp encoding
# quirkiness
ussd_service_op_map = {
'continue': '02',
'close': '17', # end
}
def __init__(self, transport, config):
super(SubmitShortMessageProcessor, self).__init__(transport, config)
self.transport = transport
self.redis = transport.redis
self.config = self.CONFIG_CLASS(config, static=True)
self.session_manager = SessionManager(
self.redis, max_session_length=self.config.max_session_length)
@inlineCallbacks
def handle_outbound_message(self, message, protocol):
to_addr = message['to_addr']
from_addr = message['from_addr']
text = message['content']
if text is None:
text = u""
vumi_message_id = message['message_id']
session_event = message['session_event']
transport_type = message['transport_type']
optional_parameters = {}
if transport_type == 'ussd':
continue_session = (
session_event != TransportUserMessage.SESSION_CLOSE)
session_info = message['transport_metadata'].get(
'session_info', {})
sixdee_session_identifier = session_info.get(
'session_identifier', '')
vumi_session_identifier = make_vumi_session_identifier(
to_addr, sixdee_session_identifier)
its_session_info = (
int(sixdee_session_identifier, 16) |
int(not continue_session))
service_op = self.ussd_service_op_map[('continue'
if continue_session
else 'close')]
optional_parameters.update({
'ussd_service_op': service_op,
'its_session_info': "%04x" % (its_session_info,)
})
if not continue_session:
yield self.session_manager.clear_session(
vumi_session_identifier)
if self.config.send_long_messages:
resp = yield protocol.submit_sm_long(
vumi_message_id,
to_addr.encode('ascii'),
long_message=text.encode(self.config.submit_sm_encoding),
data_coding=self.config.submit_sm_data_coding,
source_addr=from_addr.encode('ascii'),
optional_parameters=optional_parameters,
)
elif self.config.send_multipart_sar:
resp = yield protocol.submit_csm_sar(
vumi_message_id,
to_addr.encode('ascii'),
short_message=text.encode(self.config.submit_sm_encoding),
data_coding=self.config.submit_sm_data_coding,
source_addr=from_addr.encode('ascii'),
optional_parameters=optional_parameters,
)
elif self.config.send_multipart_udh:
resp = yield protocol.submit_csm_udh(
vumi_message_id,
to_addr.encode('ascii'),
short_message=text.encode(self.config.submit_sm_encoding),
data_coding=self.config.submit_sm_data_coding,
source_addr=from_addr.encode('ascii'),
optional_parameters=optional_parameters,
)
else:
resp = yield protocol.submit_sm(
vumi_message_id,
to_addr.encode('ascii'),
short_message=text.encode(self.config.submit_sm_encoding),
data_coding=self.config.submit_sm_data_coding,
source_addr=from_addr.encode('ascii'),
optional_parameters=optional_parameters,
)
returnValue(resp)
|
winklerand/pandas
|
refs/heads/master
|
doc/make.py
|
8
|
#!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
<del>Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.</del>
2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so
Usage
-----
python make.py clean
python make.py html
"""
from __future__ import print_function
import io
import glob # noqa
import os
import shutil
import sys
from contextlib import contextmanager
import sphinx # noqa
import argparse
import jinja2 # noqa
os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def _process_user(user):
if user is None or user is False:
user = ''
else:
user = user + '@'
return user
def upload_dev(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)):
raise SystemExit('Upload to Pydata Dev failed')
def upload_dev_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)):
raise SystemExit('PDF upload to Pydata Dev failed')
def upload_stable(user=None):
'push a copy to the pydata stable directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)):
raise SystemExit('Upload to stable failed')
def upload_stable_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)):
raise SystemExit('PDF upload to stable failed')
def upload_prev(ver, doc_root='./', user=None):
'push a copy of older release to appropriate version directory'
user = _process_user(user)
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . %spandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, user, remote_dir)
print(cmd)
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf %spandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, user, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
def build_pandas():
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
def build_prev(ver):
if os.system('git checkout v%s' % ver) != 1:
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
os.system('python make.py clean')
os.system('python make.py html')
os.system('python make.py latex')
os.system('git checkout master')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
@contextmanager
def maybe_exclude_notebooks():
"""
Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
"""
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
# Skip notebook conversion if
# 1. nbconvert isn't installed, or
# 2. nbconvert is installed, but pandoc isn't
try:
import nbconvert
except ImportError:
print("Warning: nbconvert not installed. Skipping notebooks.")
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
print("Warning: Pandoc is not installed. Skipping notebooks.")
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
def html():
check_build()
with maybe_exclude_notebooks():
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
# remove stale file
os.remove('build/html/pandas.zip')
except:
pass
def zip_html():
try:
print("\nZipping up HTML docs...")
# just in case the wonky build box doesn't have zip
# don't fail this.
os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ')
print("\n")
except:
pass
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
print("Rendering LaTeX failed.")
print("You may still be able to get a usable PDF file by going into 'build/latex'")
print("and executing 'pdflatex pandas.tex' for the requisite number of passes.")
print("Or using the 'latex_forced' target")
raise SystemExit
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Manually call pdflatex, 3 passes should ensure latex fixes up
# all the required cross-references and such.
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
def auto_dev_build(debug=False):
msg = ''
try:
step = 'clean'
clean()
step = 'html'
html()
step = 'upload dev'
upload_dev()
if not debug:
sendmail(step)
step = 'latex'
latex()
step = 'upload pdf'
upload_dev_pdf()
if not debug:
sendmail(step)
except (Exception, SystemExit) as inst:
msg = str(inst) + '\n'
sendmail(step, '[ERROR] ' + msg)
def sendmail(step=None, err_msg=None):
from_name, to_name = _get_config()
if step is None:
step = ''
if err_msg is None or '[ERROR]' not in err_msg:
msgstr = 'Daily docs %s completed successfully' % step
subject = "DOC: %s successful" % step
else:
msgstr = err_msg
subject = "DOC: %s failed" % step
import smtplib
from email.MIMEText import MIMEText
msg = MIMEText(msgstr)
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = to_name
server_str, port, login, pwd = _get_credentials()
server = smtplib.SMTP(server_str, port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, pwd)
try:
server.sendmail(from_name, to_name, msg.as_string())
finally:
server.close()
def _get_dir(subdir=None):
import getpass
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
if subdir is None:
subdir = '/code/scripts/config'
conf_dir = '%s/%s' % (HOME, subdir)
return conf_dir
def _get_credentials():
tmp_dir = _get_dir()
cred = '%s/credentials' % tmp_dir
with open(cred, 'r') as fh:
server, port, un, domain = fh.read().split(',')
port = int(port)
login = un + '@' + domain + '.com'
import base64
with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh:
pwd = base64.b64decode(fh.read())
return server, port, login, pwd
def _get_config():
tmp_dir = _get_dir()
with open('%s/addresses' % tmp_dir, 'r') as fh:
from_name, to_name = fh.read().split(',')
return from_name, to_name
funcd = {
'html': html,
'zip_html': zip_html,
'upload_dev': upload_dev,
'upload_stable': upload_stable,
'upload_dev_pdf': upload_dev_pdf,
'upload_stable_pdf': upload_stable_pdf,
'latex': latex,
'latex_forced': latex_forced,
'clean': clean,
'auto_dev': auto_dev_build,
'auto_debug': lambda: auto_dev_build(True),
'build_pandas': build_pandas,
'all': all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
import argparse
argparser = argparse.ArgumentParser(description="""
pandas documentation builder
""".strip())
# argparser.add_argument('-arg_name', '--arg_name',
# metavar='label for arg help',
# type=str|etc,
# nargs='N|*|?|+|argparse.REMAINDER',
# required=False,
# #choices='abc',
# help='help string',
# action='store|store_true')
# args = argparser.parse_args()
#print args.accumulate(args.integers)
def generate_index(api=True, single=False, **kwds):
from jinja2 import Template
with open("source/index.rst.template") as f:
t = Template(f.read())
with open("source/index.rst","w") as f:
f.write(t.render(api=api,single=single,**kwds))
import argparse
argparser = argparse.ArgumentParser(description="pandas documentation builder",
epilog="Targets : %s" % funcd.keys())
argparser.add_argument('--no-api',
default=False,
help='Ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=False,
help='filename of section to compile, e.g. "indexing"')
argparser.add_argument('--user',
type=str,
default=False,
help='Username to connect to the pydata server')
def main():
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.single:
args.single = os.path.basename(args.single).split(".rst")[0]
if 'clean' in unknown:
args.single=False
generate_index(api=not args.no_api and not args.single, single=args.single)
if len(sys.argv) > 2:
ftype = sys.argv[1]
ver = sys.argv[2]
if ftype == 'build_previous':
build_prev(ver, user=args.user)
if ftype == 'upload_previous':
upload_prev(ver, user=args.user)
elif len(sys.argv) == 2:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
arg, list(funcd.keys())))
if args.user:
func(user=args.user)
else:
func()
else:
small_docs = False
all()
# os.chdir(current_dir)
if __name__ == '__main__':
import sys
sys.exit(main())
|
chokribr/invenioold
|
refs/heads/master
|
modules/bibauthorid/lib/bibauthorid_cluster_set_unit_tests.py
|
11
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the search engine."""
__revision__ = \
"$Id$"
from itertools import chain
from invenio.testutils import InvenioTestCase, make_test_suite, run_test_suite
from invenio.bibauthorid_cluster_set import ClusterSet
class TestCluster(InvenioTestCase):
def setUp(self):
self.clusters = [ClusterSet.Cluster(range(i*10,i*10+10)) for i in range(3)]
def test_quarrel_hate(self):
c1 = self.clusters[0]
c2 = self.clusters[1]
self.assertFalse(c1.hates(c2))
c1.quarrel(c2)
self.assertTrue(c1.hates(c2))
self.assertTrue(c2.hates(c1))
class TestClusterSet(InvenioTestCase):
def setUp(self):
self.clusters = [ClusterSet.Cluster(range(i*10,i*10+5)) for i in range(10)]
def test_udate_all_bibs(self):
c = ClusterSet()
c.clusters = self.clusters
c.update_bibs()
self.assertTrue(c.num_all_bibs == 50)
self.assertTrue( sorted(list((c.all_bibs()))) ==
list(chain.from_iterable(range(i*10,i*10+5) for i in range(10))))
TEST_SUITE = make_test_suite(TestCluster, TestClusterSet)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
thinkopensolutions/geraldo
|
refs/heads/master
|
site/newsite/site-geraldo/django/contrib/localflavor/uk/forms.py
|
32
|
"""
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.default_error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
|
MycChiu/tensorflow
|
refs/heads/master
|
tensorflow/python/platform/flags.py
|
85
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse as _argparse
from tensorflow.python.util.all_util import remove_undocumented
_global_parser = _argparse.ArgumentParser()
# pylint: disable=invalid-name
class _FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self, args=None):
result, unparsed = _global_parser.parse_known_args(args=args)
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
_allowed_symbols = [
# We rely on gflags documentation.
'DEFINE_bool',
'DEFINE_boolean',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_string',
'FLAGS',
]
remove_undocumented(__name__, _allowed_symbols)
|
ajdawson/iris
|
refs/heads/master
|
lib/iris/tests/test_analysis.py
|
5
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import cartopy.crs as ccrs
import cf_units
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis.cartography
import iris.analysis.maths
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib
import matplotlib.pyplot as plt
class TestAnalysisCubeCoordComparison(tests.IrisTest):
def assertComparisonDict(self, comparison_dict, reference_filename):
string = ''
for key in sorted(comparison_dict):
coord_groups = comparison_dict[key]
string += ('%40s ' % key)
names = [[coord.name() if coord is not None else 'None'
for coord in coords]
for coords in coord_groups]
string += str(sorted(names))
string += '\n'
self.assertString(string, reference_filename)
def test_coord_comparison(self):
cube1 = iris.cube.Cube(np.zeros((41, 41)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points1 = -180 + 4.5 * np.arange(41, dtype=np.float32)
lat_points = -90 + 4.5 * np.arange(41, dtype=np.float32)
cube1.add_dim_coord(iris.coords.DimCoord(lon_points1, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube1.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube1.add_aux_coord(iris.coords.AuxCoord(0, long_name='z'))
cube1.add_aux_coord(iris.coords.AuxCoord(['foobar'], long_name='f', units='no_unit'))
cube2 = iris.cube.Cube(np.zeros((41, 41, 5)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points2 = -160 + 4.5 * np.arange(41, dtype=np.float32)
cube2.add_dim_coord(iris.coords.DimCoord(lon_points2, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube2.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube2.add_dim_coord(iris.coords.DimCoord([5, 7, 9, 11, 13], long_name='z'), 2)
cube3 = cube1.copy()
lon = cube3.coord("longitude")
lat = cube3.coord("latitude")
cube3.remove_coord(lon)
cube3.remove_coord(lat)
cube3.add_dim_coord(lon, 1)
cube3.add_dim_coord(lat, 0)
cube3.coord('z').points = [20]
cube4 = cube2.copy()
lon = cube4.coord("longitude")
lat = cube4.coord("latitude")
cube4.remove_coord(lon)
cube4.remove_coord(lat)
cube4.add_dim_coord(lon, 1)
cube4.add_dim_coord(lat, 0)
coord_comparison = iris.analysis.coord_comparison
self.assertComparisonDict(coord_comparison(cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2), ('analysis', 'coord_comparison', 'cube1_cube2.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube3), ('analysis', 'coord_comparison', 'cube1_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube4), ('analysis', 'coord_comparison', 'cube1_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube3), ('analysis', 'coord_comparison', 'cube2_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube4), ('analysis', 'coord_comparison', 'cube2_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube3, cube4), ('analysis', 'coord_comparison', 'cube3_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2, cube1), ('analysis', 'coord_comparison', 'cube1_cube2_cube1.txt'))
# get a coord comparison result and check that we are getting back what was expected
coord_group = coord_comparison(cube1, cube2)['grouped_coords'][0]
self.assertIsInstance(coord_group, iris.analysis._CoordGroup)
self.assertIsInstance(list(coord_group)[0], iris.coords.Coord)
class TestAnalysisWeights(tests.IrisTest):
def test_weighted_mean_little(self):
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
weights = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=np.float32)
cube = iris.cube.Cube(data, long_name="test_data", units="1")
hcs = iris.coord_systems.GeogCS(6371229)
lat_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lat", units="1", coord_system=hcs)
lon_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lon", units="1", coord_system=hcs)
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(3, dtype=np.float32), long_name="dummy", units=1), 1)
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
a = cube.collapsed('lat', iris.analysis.MEAN, weights=weights)
self.assertCMLApproxData(a, ('analysis', 'weighted_mean_lat.cml'))
b = cube.collapsed(lon_coord, iris.analysis.MEAN, weights=weights)
b.data = np.asarray(b.data)
self.assertCMLApproxData(b, ('analysis', 'weighted_mean_lon.cml'))
self.assertEqual(b.coord('dummy').shape, (1, ))
# test collapsing multiple coordinates (and the fact that one of the coordinates isn't the same coordinate instance as on the cube)
c = cube.collapsed([lat_coord[:], lon_coord], iris.analysis.MEAN, weights=weights)
self.assertCMLApproxData(c, ('analysis', 'weighted_mean_latlon.cml'))
self.assertEqual(c.coord('dummy').shape, (1, ))
# Check new coord bounds - made from points
self.assertArrayEqual(c.coord('lat').bounds, [[1, 3]])
# Check new coord bounds - made from bounds
cube.coord('lat').bounds = [[0.5, 1.5], [1.5, 2.5], [2.5, 3.5]]
c = cube.collapsed(['lat', 'lon'], iris.analysis.MEAN, weights=weights)
self.assertArrayEqual(c.coord('lat').bounds, [[0.5, 3.5]])
cube.coord('lat').bounds = None
# Check there was no residual change
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
@tests.skip_data
def test_weighted_mean(self):
### compare with pp_area_avg - which collapses both lat and lon
#
# pp = ppa('/data/local/dataZoo/PP/simple_pp/global.pp', 0)
# print, pp_area(pp, /box)
# print, pp_area_avg(pp, /box) #287.927
# ;gives an answer of 287.927
#
###
e = iris.tests.stock.simple_pp()
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
e.coord('latitude').guess_bounds()
e.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(e)
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
f, collapsed_area_weights = e.collapsed('latitude', iris.analysis.MEAN, weights=area_weights, returned=True)
g = f.collapsed('longitude', iris.analysis.MEAN, weights=collapsed_area_weights)
# check it's a 0d, scalar cube
self.assertEqual(g.shape, ())
# check the value - pp_area_avg's result of 287.927 differs by factor of 1.00002959
np.testing.assert_approx_equal(g.data, 287.935, significant=5)
#check we get summed weights even if we don't give any
h, summed_weights = e.collapsed('latitude', iris.analysis.MEAN, returned=True)
assert(summed_weights is not None)
# Check there was no residual change
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
# Test collapsing of missing coord
self.assertRaises(iris.exceptions.CoordinateNotFoundError, e.collapsed, 'platitude', iris.analysis.MEAN)
# Test collpasing of non data coord
self.assertRaises(iris.exceptions.CoordinateCollapseError, e.collapsed, 'pressure', iris.analysis.MEAN)
@tests.skip_data
class TestAnalysisBasic(tests.IrisTest):
def setUp(self):
file = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
cubes = iris.load(file)
self.cube = cubes[0]
self.assertCML(self.cube, ('analysis', 'original.cml'))
def _common(self, name, aggregate, original_name='original_common.cml', *args, **kwargs):
self.cube.data = self.cube.data.astype(np.float64)
self.assertCML(self.cube, ('analysis', original_name))
a = self.cube.collapsed('grid_latitude', aggregate)
self.assertCMLApproxData(a, ('analysis', '%s_latitude.cml' % name), *args, **kwargs)
b = a.collapsed('grid_longitude', aggregate)
self.assertCMLApproxData(b, ('analysis', '%s_latitude_longitude.cml' % name), *args, **kwargs)
c = self.cube.collapsed(['grid_latitude', 'grid_longitude'], aggregate)
self.assertCMLApproxData(c, ('analysis', '%s_latitude_longitude_1call.cml' % name), *args, **kwargs)
# Check there was no residual change
self.assertCML(self.cube, ('analysis', original_name))
def test_mean(self):
self._common('mean', iris.analysis.MEAN, decimal=1)
def test_std_dev(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('std_dev', iris.analysis.STD_DEV, decimal=1)
def test_hmean(self):
# harmonic mean requires data > 0
self.cube.data *= self.cube.data
self._common('hmean', iris.analysis.HMEAN, 'original_hmean.cml', decimal=1)
def test_gmean(self):
self._common('gmean', iris.analysis.GMEAN, decimal=1)
def test_variance(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('variance', iris.analysis.VARIANCE, decimal=1)
def test_median(self):
self._common('median', iris.analysis.MEDIAN)
def test_sum(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('sum', iris.analysis.SUM, decimal=1)
def test_max(self):
self._common('max', iris.analysis.MAX)
def test_min(self):
self._common('min', iris.analysis.MIN)
def test_rms(self):
self._common('rms', iris.analysis.RMS)
def test_duplicate_coords(self):
self.assertRaises(ValueError, tests.stock.track_1d, duplicate_x=True)
class TestMissingData(tests.IrisTest):
def setUp(self):
self.cube_with_nan = tests.stock.simple_2d()
data = self.cube_with_nan.data.astype(np.float32)
self.cube_with_nan.data = data.copy()
self.cube_with_nan.data[1, 0] = np.nan
self.cube_with_nan.data[2, 2] = np.nan
self.cube_with_nan.data[2, 3] = np.nan
self.cube_with_mask = tests.stock.simple_2d()
self.cube_with_mask.data = ma.array(self.cube_with_nan.data,
mask=np.isnan(self.cube_with_nan.data))
def test_max(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, 7, 9]))
def test_min(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, 5, 8]))
def test_sum(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, 18, 17]))
class TestAggregator_mdtol_keyword(tests.IrisTest):
def setUp(self):
data = ma.array([[1, 2], [4, 5]], dtype=np.float32,
mask=[[False, True], [False, True]])
cube = iris.cube.Cube(data, long_name="test_data", units="1")
lat_coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.float32),
long_name="lat", units="1")
lon_coord = iris.coords.DimCoord(np.array([3, 4], dtype=np.float32),
long_name="lon", units="1")
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
self.cube = cube
def test_single_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN)
t = ma.array([2.5, 5.], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.5)
t = ma.array([2.5, 5], mask=[False, False])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol_alt(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.4)
t = ma.array([2.5, 5], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_multi_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN)
t = np.array(2.5)
self.assertArrayEqual(collapsed.data, t)
def test_multi_coord_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN, mdtol=0.4)
t = ma.array(2.5, mask=True)
self.assertMaskedArrayEqual(collapsed.data, t)
class TestAggregators(tests.IrisTest):
def test_percentile_1d(self):
cube = tests.stock.simple_1d()
first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([2.5], dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_1d.cml'),
checksum=False)
third_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=75)
np.testing.assert_array_almost_equal(third_quartile.data,
np.array([7.5],
dtype=np.float32))
self.assertCML(third_quartile,
('analysis', 'third_quartile_foo_1d.cml'),
checksum=False)
def test_percentile_2d(self):
cube = tests.stock.simple_2d()
first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([0.75, 4.75, 8.75],
dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_2d.cml'),
checksum=False)
first_quartile = cube.collapsed(('foo', 'bar'),
iris.analysis.PERCENTILE, percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([2.75],
dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_bar_2d.cml'),
checksum=False)
def test_percentile_3d(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 0, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[6., 7., 8., 9.],
[10., 11., 12., 13.],
[14., 15., 16., 17.]],
dtype=np.float32))
def test_percentile_3d_axis_one(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 1, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[4., 5., 6., 7.],
[16., 17., 18., 19.]],
dtype=np.float32))
def test_percentile_3d_axis_two(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 2, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[1.5, 5.5, 9.5],
[13.5, 17.5, 21.5]],
dtype=np.float32))
def test_percentile_3d_masked(self):
cube = tests.stock.simple_3d_mask()
last_quartile = cube.collapsed('wibble',
iris.analysis.PERCENTILE, percent=75)
np.testing.assert_array_almost_equal(last_quartile.data,
np.array([[12., 13., 14., 15.],
[16., 17., 18., 19.],
[20., 18., 19., 20.]],
dtype=np.float32))
self.assertCML(last_quartile, ('analysis',
'last_quartile_foo_3d_masked.cml'),
checksum=False)
def test_percentile_3d_notmasked(self):
cube = tests.stock.simple_3d()
last_quartile = cube.collapsed('wibble',
iris.analysis.PERCENTILE, percent=75)
np.testing.assert_array_almost_equal(last_quartile.data,
np.array([[9., 10., 11., 12.],
[13., 14., 15., 16.],
[17., 18., 19., 20.]],
dtype=np.float32))
self.assertCML(last_quartile, ('analysis',
'last_quartile_foo_3d_notmasked.cml'),
checksum=False)
def test_proportion(self):
cube = tests.stock.simple_1d()
r = cube.data >= 5
gt5 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6 / 11.]))
self.assertCML(gt5, ('analysis', 'proportion_foo_1d.cml'), checksum=False)
def test_proportion_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 0.5, 1], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1 / 3, 1 / 3, 2 / 3, 2 / 3], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0.5], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_bar_2d.cml'), checksum=False)
# mask the data
cube.data = ma.array(cube.data, mask=cube.data % 2)
cube.data.mask[1, 2] = True
gt6_masked = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6_masked.data, ma.array([1 / 3, None, 1 / 2, None],
mask=[False, True, False, True],
dtype=np.float32))
self.assertCML(gt6_masked, ('analysis', 'proportion_foo_2d_masked.cml'), checksum=False)
def test_count(self):
cube = tests.stock.simple_1d()
gt5 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6]))
gt5.data = gt5.data.astype('i8')
self.assertCML(gt5, ('analysis', 'count_foo_1d.cml'), checksum=False)
def test_count_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 2, 4], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1, 1, 2, 2], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([6], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_bar_2d.cml'), checksum=False)
def test_weighted_sum_consistency(self):
# weighted sum with unit weights should be the same as a sum
cube = tests.stock.simple_1d()
normal_sum = cube.collapsed('foo', iris.analysis.SUM)
weights = np.ones_like(cube.data)
weighted_sum = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(normal_sum.data, weighted_sum.data)
def test_weighted_sum_1d(self):
# verify 1d weighted sum is correct
cube = tests.stock.simple_1d()
weights = np.array([.05, .05, .1, .1, .2, .3, .2, .1, .1, .05, .05])
result = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertAlmostEqual(result.data, 6.5)
self.assertCML(result, ('analysis', 'sum_weighted_1d.cml'),
checksum=False)
def test_weighted_sum_2d(self):
# verify 2d weighted sum is correct
cube = tests.stock.simple_2d()
weights = np.array([.3, .4, .3])
weights = iris.util.broadcast_to_shape(weights, cube.shape, [0])
result = cube.collapsed('bar', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(result.data, np.array([4., 5., 6., 7.]))
self.assertCML(result, ('analysis', 'sum_weighted_2d.cml'),
checksum=False)
def test_weighted_rms(self):
cube = tests.stock.simple_2d()
# modify cube data so that the results are nice numbers
cube.data = np.array([[4, 7, 10, 8],
[21, 30, 12, 24],
[14, 16, 20, 8]],
dtype=np.float64)
weights = np.array([[1, 4, 3, 2],
[6, 4.5, 1.5, 3],
[2, 1, 1.5, 0.5]],
dtype=np.float64)
expected_result = np.array([8.0, 24.0, 16.0])
result = cube.collapsed('foo', iris.analysis.RMS, weights=weights)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertCML(result, ('analysis', 'rms_weighted_2d.cml'),
checksum=False)
@tests.skip_data
class TestRotatedPole(tests.GraphicsTest):
@tests.skip_plot
def _check_both_conversions(self, cube):
rlons, rlats = iris.analysis.cartography.get_xy_grids(cube)
rcs = cube.coord_system('RotatedGeogCS')
x, y = iris.analysis.cartography.unrotate_pole(
rlons, rlats, rcs.grid_north_pole_longitude,
rcs.grid_north_pole_latitude)
plt.scatter(x, y)
self.check_graphic()
plt.scatter(rlons, rlats)
self.check_graphic()
def test_all(self):
path = tests.get_data_path(('PP', 'ukVorog', 'ukv_orog_refonly.pp'))
master_cube = iris.load_cube(path)
# Check overall behaviour.
cube = master_cube[::10, ::10]
self._check_both_conversions(cube)
# Check numerical stability.
cube = master_cube[210:238, 424:450]
self._check_both_conversions(cube)
def test_unrotate_nd(self):
rlons = np.array([[350., 352.], [350., 352.]])
rlats = np.array([[-5., -0.], [-4., -1.]])
resx, resy = iris.analysis.cartography.unrotate_pole(rlons, rlats,
178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([[-16.42176094, -14.85892262],
[-16.71055023, -14.58434624]])
soly = np.array([[ 46.00724251, 51.29188893],
[ 46.98728486, 50.30706042]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_unrotate_1d(self):
rlons = np.array([350., 352., 354., 356.])
rlats = np.array([-5., -0., 5., 10.])
resx, resy = iris.analysis.cartography.unrotate_pole(
rlons.flatten(), rlats.flatten(), 178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([-16.42176094, -14.85892262,
-12.88946157, -10.35078336])
soly = np.array([46.00724251, 51.29188893,
56.55031485, 61.77015703])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_nd(self):
rlons = np.array([[350., 351.], [352., 353.]])
rlats = np.array([[10., 15.], [20., 25.]])
resx, resy = iris.analysis.cartography.rotate_pole(rlons, rlats,
20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([[148.69672569, 149.24727087],
[149.79067025, 150.31754368]])
soly = np.array([[18.60905789, 23.67749384],
[28.74419024, 33.8087963 ]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_1d(self):
rlons = np.array([350., 351., 352., 353.])
rlats = np.array([10., 15., 20., 25.])
resx, resy = iris.analysis.cartography.rotate_pole(rlons.flatten(),
rlats.flatten(), 20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([148.69672569, 149.24727087,
149.79067025, 150.31754368])
soly = np.array([18.60905789, 23.67749384,
28.74419024, 33.8087963 ])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
@tests.skip_data
class TestAreaWeights(tests.IrisTest):
def test_area_weights(self):
small_cube = iris.tests.stock.simple_pp()
# Get offset, subsampled region: small enough to test against literals
small_cube = small_cube[10:, 35:]
small_cube = small_cube[::8, ::8]
small_cube = small_cube[:5, :4]
# pre-check non-data properties
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
# check area-weights values
small_cube.coord('latitude').guess_bounds()
small_cube.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(small_cube)
expected_results = np.array(
[[3.11955916e+12, 3.11956058e+12, 3.11955916e+12, 3.11956058e+12],
[5.21950793e+12, 5.21951031e+12, 5.21950793e+12, 5.21951031e+12],
[6.68991432e+12, 6.68991737e+12, 6.68991432e+12, 6.68991737e+12],
[7.35341320e+12, 7.35341655e+12, 7.35341320e+12, 7.35341655e+12],
[7.12998265e+12, 7.12998589e+12, 7.12998265e+12, 7.12998589e+12]],
dtype=np.float64)
self.assertArrayAllClose(area_weights, expected_results, rtol=1e-8)
# Check there was no residual change
small_cube.coord('latitude').bounds = None
small_cube.coord('longitude').bounds = None
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
class TestAreaWeightGeneration(tests.IrisTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()
def test_area_weights_std(self):
# weights for stock 4d data
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_order(self):
# weights for data with dimensions in a different order
order = [3, 2, 1, 0] # (lon, lat, level, time)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_non_adjacent(self):
# weights for cube with non-adjacent latitude/longitude dimensions
order = [0, 3, 1, 2] # (time, lon, level, lat)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_scalar_latitude(self):
# weights for cube with a scalar latitude dimension
cube = self.cube[:, :, 0, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar_longitude(self):
# weights for cube with a scalar longitude dimension
cube = self.cube[:, :, :, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar(self):
# weights for cube with scalar latitude and longitude dimensions
cube = self.cube[:, :, 0, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_latitude(self):
# singleton (1-point) latitude dimension
cube = self.cube[:, :, 0:1, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_longitude(self):
# singleton (1-point) longitude dimension
cube = self.cube[:, :, :, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singletons(self):
# singleton (1-point) latitude and longitude dimensions
cube = self.cube[:, :, 0:1, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_normalized(self):
# normalized area weights must sum to one over lat/lon dimensions.
weights = iris.analysis.cartography.area_weights(self.cube,
normalize=True)
sumweights = weights.sum(axis=3).sum(axis=2) # sum over lon and lat
self.assertArrayAlmostEqual(sumweights, 1)
def test_area_weights_non_contiguous(self):
# Slice the cube so that we have non-contiguous longitude
# bounds.
ind = (0, 1, 2, -3, -2, -1)
cube = self.cube[..., ind]
weights = iris.analysis.cartography.area_weights(cube)
expected = iris.analysis.cartography.area_weights(self.cube)[..., ind]
self.assertArrayEqual(weights, expected)
def test_area_weights_no_lon_bounds(self):
self.cube.coord('grid_longitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
def test_area_weights_no_lat_bounds(self):
self.cube.coord('grid_latitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
@tests.skip_data
class TestLatitudeWeightGeneration(tests.IrisTest):
def setUp(self):
path = iris.tests.get_data_path(['NetCDF', 'rotated', 'xyt',
'small_rotPole_precipitation.nc'])
self.cube = iris.load_cube(path)
self.cube_dim_lat = self.cube.copy()
self.cube_dim_lat.remove_coord('latitude')
self.cube_dim_lat.remove_coord('longitude')
# The 2d cubes are unrealistic, you would not want to weight by
# anything other than grid latitude in real-world scenarios. However,
# the technical details are suitable for testing purposes, providing
# a nice analog for a 2d latitude coordinate from a curvilinear grid.
self.cube_aux_lat = self.cube.copy()
self.cube_aux_lat.remove_coord('grid_latitude')
self.cube_aux_lat.remove_coord('grid_longitude')
self.lat1d = self.cube.coord('grid_latitude').points
self.lat2d = self.cube.coord('latitude').points
def test_cosine_latitude_weights_range(self):
# check the range of returned values, needs a cube that spans the full
# latitude range
lat_coord = iris.coords.DimCoord(np.linspace(-90, 90, 73),
standard_name='latitude',
units=cf_units.Unit('degrees_north'))
cube = iris.cube.Cube(np.ones([73], dtype=np.float64),
long_name='test_cube', units='1')
cube.add_dim_coord(lat_coord, 0)
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertTrue(weights.max() <= 1)
self.assertTrue(weights.min() >= 0)
def test_cosine_latitude_weights_0d(self):
# 0d latitude dimension (scalar coordinate)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat[:, 0, :])
self.assertEqual(weights.shape, self.cube_dim_lat[:, 0, :].shape)
self.assertAlmostEqual(weights[0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d_singleton(self):
# singleton (1-point) 1d latitude coordinate (time, lat, lon)
cube = self.cube_dim_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertAlmostEqual(weights[0, 0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d(self):
# 1d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube.shape)
self.assertArrayAlmostEqual(weights[0, :, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_first(self):
# 1d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_last(self):
# 1d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[0, 0, :],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_2d_singleton1(self):
# 2d latitude coordinate with first dimension singleton
cube = self.cube_aux_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, :])))
def test_cosine_latitude_weights_2d_singleton2(self):
# 2d latitude coordinate with second dimension singleton
cube = self.cube_aux_lat[:, :, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[:, 0:1])))
def test_cosine_latitude_weights_2d_singleton3(self):
# 2d latitude coordinate with both dimensions singleton
cube = self.cube_aux_lat[:, 0:1, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, 0:1])))
def test_cosine_latitude_weights_2d(self):
# 2d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_first(self):
# 2d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_last(self):
# 2d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d.T)))
def test_cosine_latitude_weights_no_latitude(self):
# no coordinate identified as latitude
self.cube_dim_lat.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
def test_cosine_latitude_weights_multiple_latitude(self):
# two coordinates identified as latitude
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube)
class TestRollingWindow(tests.IrisTest):
def setUp(self):
# XXX Comes from test_aggregated_by
cube = iris.cube.Cube(np.array([[6, 10, 12, 18], [8, 12, 14, 20], [18, 12, 10, 6]]), long_name='temperature', units='kelvin')
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 5, 10], dtype=np.float64), 'latitude', units='degrees'), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 2, 4, 6], dtype=np.float64), 'longitude', units='degrees'), 1)
self.cube = cube
def test_non_mean_operator(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MAX, window=2)
expected_result = np.array([[10, 12, 18],
[12, 14, 20],
[18, 12, 10]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
def test_longitude_simple(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 8., 11., 15.],
[ 10., 13., 17.],
[ 15., 11., 8.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_longitude.cml'))
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_longitude_masked(self):
self.cube.data = ma.array(self.cube.data,
mask=[[True, True, True, True],
[True, False, True, True],
[False, False, False, False]])
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
expected_result = np.ma.array([[-99., -99., -99.],
[12., 12., -99.],
[15., 11., 8.]],
mask=[[True, True, True],
[False, False, True],
[False, False, False]],
dtype=np.float64)
self.assertMaskedArrayEqual(expected_result, res_cube.data)
def test_longitude_circular(self):
cube = self.cube
cube.coord('longitude').circular = True
self.assertRaises(iris.exceptions.NotYetImplementedError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_different_length_windows(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=4)
expected_result = np.array([[ 11.5],
[ 13.5],
[ 11.5]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'size_4_longitude.cml'))
# Window too long:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=6)
# Window too small:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_bad_coordinate(self):
self.assertRaises(KeyError, self.cube.rolling_window, 'wibble', iris.analysis.MEAN, window=0)
def test_latitude_simple(self):
res_cube = self.cube.rolling_window('latitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 7., 11., 13., 19.],
[ 13., 12., 12., 13.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_latitude.cml'))
def test_mean_with_weights_consistency(self):
# equal weights should be the same as the mean with no weights
wts = np.array([0.5, 0.5], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2,
weights=wts)
expected_result = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
self.assertArrayEqual(expected_result.data, res_cube.data)
def test_mean_with_weights(self):
# rolling window mean with weights
wts = np.array([0.1, 0.6, 0.3], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=3,
weights=wts)
expected_result = np.array([[10.2, 13.6],
[12.2, 15.6],
[12.0, 9.0]], dtype=np.float64)
# use almost equal to compare floats
self.assertArrayAlmostEqual(expected_result, res_cube.data)
if __name__ == "__main__":
tests.main()
|
e-kolpakov/study-model
|
refs/heads/master
|
tests/agents/student/behaviors/test_resource_choice.py
|
1
|
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from model.agents.resource import Resource
from model.agents.student.behaviors.resource_choice import RandomResourceChoiceBehavior, RationalResourceChoiceBehavior
from model.knowledge_representation import Fact, Curriculum
from model.knowledge_representation.lesson_type import Lecture
__author__ = 'e.kolpakov'
@pytest.fixture
def student():
return mock.Mock(spec=student)
@pytest.fixture
def curriculum():
return mock.Mock(spec=Curriculum)
def _make_lecture(code, fact_codes):
return Lecture(code, facts=(Fact(code) for code in fact_codes))
class TestRandomResourceChoiceBehavior:
@pytest.fixture
def behavior(self):
return RandomResourceChoiceBehavior()
def test_choose_from_given_resources(self, student, curriculum, behavior):
resources = (Resource('r1', [], None), Resource('r2', [], None))
chosen = behavior.choose_resource(student, curriculum, resources)
assert chosen in resources
class TestRationalResourceChoiceBehavior:
@pytest.fixture
def behavior(self):
return RationalResourceChoiceBehavior()
@pytest.mark.parametrize("lesson1, lesson2, exp_res_id", [
(['A', 'B', 'C'], ['A', 'B'], 'r1'),
(['A', 'C'], ['A', 'B', 'C'], 'r2'),
(['A', 'D'], ['B'], 'r1'),
(['C'], ['A', 'B'], 'r2'),
])
def test_student_zero_knowledge(self, student, env_mock, curriculum, behavior, lesson1, lesson2, exp_res_id):
resources = (
Resource('r1', [_make_lecture("l1", lesson1)], agent_id='r1'),
Resource('r2', [_make_lecture("l2", lesson2)], agent_id='r2')
)
type(env_mock).now = PropertyMock(return_value=0)
for resource in resources:
resource.env = env_mock
student.knowledge = set()
chosen = behavior.choose_resource(student, curriculum, resources)
assert chosen.name == exp_res_id
@pytest.mark.parametrize("student_know, lesson1, lesson2, exp_res_id", [
(['A'], ['A', 'B'], ['A', 'B', 'C'], 'r2'),
(['A', 'B'], ['A', 'C'], ['B'], 'r1'),
(['A', 'C'], ['A', 'C'], ['B'], 'r2'),
])
def test_nonzero_student_knowledge(
self, student, env_mock, curriculum, behavior, student_know, lesson1, lesson2, exp_res_id
):
resources = (
Resource('r1', [_make_lecture("l1", lesson1)], agent_id='r1'),
Resource('r2', [_make_lecture("l2", lesson2)], agent_id='r2')
)
type(env_mock).now = PropertyMock(return_value=0)
for resource in resources:
resource.env = env_mock
student.knowledge = set([Fact(comp) for comp in student_know])
chosen = behavior.choose_resource(student, curriculum, resources)
assert chosen.name == exp_res_id
|
danithaca/mxnet
|
refs/heads/master
|
python/mxnet/profiler.py
|
15
|
# coding: utf-8
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""Profiler setting methods."""
from __future__ import absolute_import
import ctypes
from .base import _LIB, check_call, c_str
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler.
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
mode2int = {'symbolic': 0, 'all': 1}
check_call(_LIB.MXSetProfilerConfig(
ctypes.c_int(mode2int[mode]),
c_str(filename)))
def profiler_set_state(state='stop'):
"""Set up the profiler state to record operator.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
"""
state2int = {'stop': 0, 'run': 1}
check_call(_LIB.MXSetProfilerState(ctypes.c_int(state2int[state])))
def dump_profile():
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally."""
check_call(_LIB.MXDumpProfile())
|
pyblish/pyblish-win
|
refs/heads/master
|
lib/Python27/Lib/lib2to3/__main__.py
|
466
|
import sys
from .main import main
sys.exit(main("lib2to3.fixes"))
|
360youlun/django-cms
|
refs/heads/develop
|
cms/migrations/0018_site_permissions.py
|
525
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
dbrgn/pygments-mirror
|
refs/heads/master
|
tests/test_basic_api.py
|
7
|
# -*- coding: utf-8 -*-
"""
Pygments basic API tests
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import random
import unittest
from pygments import lexers, formatters, filters, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
from pygments.util import BytesIO, StringIO, bytes, b
import support
TESTFILE, TESTDIR = support.location(__file__)
test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
def test_lexer_import_all():
# instantiate every lexer, to see if the token type defs are correct
for x in lexers.LEXERS.keys():
c = getattr(lexers, x)()
def test_lexer_classes():
# test that every lexer class has the correct public API
def verify(cls):
assert type(cls.name) is str
for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
assert hasattr(cls, attr)
assert type(getattr(cls, attr)) is list, \
"%s: %s attribute wrong" % (cls, attr)
result = cls.analyse_text("abc")
assert isinstance(result, float) and 0.0 <= result <= 1.0
result = cls.analyse_text(".abc")
assert isinstance(result, float) and 0.0 <= result <= 1.0
inst = cls(opt1="val1", opt2="val2")
if issubclass(cls, RegexLexer):
if not hasattr(cls, '_tokens'):
# if there's no "_tokens", the lexer has to be one with
# multiple tokendef variants
assert cls.token_variants
for variant in cls.tokens:
assert 'root' in cls.tokens[variant]
else:
assert 'root' in cls._tokens, \
'%s has no root state' % cls
if cls.name in ['XQuery', 'Opa']: # XXX temporary
return
tokens = list(inst.get_tokens(test_content))
txt = ""
for token in tokens:
assert isinstance(token, tuple)
assert isinstance(token[0], _TokenType)
if isinstance(token[1], str):
print repr(token[1])
assert isinstance(token[1], unicode)
txt += token[1]
assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
(cls.name, test_content, txt)
for lexer in lexers._iter_lexerclasses():
yield verify, lexer
def test_lexer_options():
# test that the basic options work
def ensure(tokens, output):
concatenated = ''.join(token[1] for token in tokens)
assert concatenated == output, \
'%s: %r != %r' % (lexer, concatenated, output)
def verify(cls):
inst = cls(stripnl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb\n')
ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
inst = cls(stripall=True)
ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
# some lexers require full lines in input
if cls.__name__ not in (
'PythonConsoleLexer', 'RConsoleLexer', 'RubyConsoleLexer',
'SqliteConsoleLexer', 'MatlabSessionLexer', 'ErlangShellLexer',
'BashSessionLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
'PostgresConsoleLexer', 'ElixirConsoleLexer', 'JuliaConsoleLexer',
'RobotFrameworkLexer', 'DylanConsoleLexer', 'ShellSessionLexer'):
inst = cls(ensurenl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb')
inst = cls(ensurenl=False, stripall=True)
ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
for lexer in lexers._iter_lexerclasses():
if lexer.__name__ == 'RawTokenLexer':
# this one is special
continue
yield verify, lexer
def test_get_lexers():
# test that the lexers functions work
def verify(func, args):
x = func(opt='val', *args)
assert isinstance(x, lexers.PythonLexer)
assert x.options["opt"] == "val"
for func, args in [(lexers.get_lexer_by_name, ("python",)),
(lexers.get_lexer_for_filename, ("test.py",)),
(lexers.get_lexer_for_mimetype, ("text/x-python",)),
(lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)),
(lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
]:
yield verify, func, args
for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.iteritems():
assert cls == lexers.find_lexer_class(lname).__name__
for alias in aliases:
assert cls == lexers.get_lexer_by_name(alias).__class__.__name__
for mimetype in mimetypes:
assert cls == lexers.get_lexer_for_mimetype(mimetype).__class__.__name__
def test_formatter_public_api():
ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
out = StringIO()
# test that every formatter class has the correct public API
def verify(formatter, info):
assert len(info) == 4
assert info[0], "missing formatter name"
assert info[1], "missing formatter aliases"
assert info[3], "missing formatter docstring"
if formatter.name == 'Raw tokens':
# will not work with Unicode output file
return
try:
inst = formatter(opt1="val1")
except (ImportError, FontNotFound):
return
try:
inst.get_style_defs()
except NotImplementedError:
# may be raised by formatters for which it doesn't make sense
pass
inst.format(ts, out)
for formatter, info in formatters.FORMATTERS.iteritems():
yield verify, formatter, info
def test_formatter_encodings():
from pygments.formatters import HtmlFormatter
# unicode output
fmt = HtmlFormatter()
tokens = [(Text, u"ä")]
out = format(tokens, fmt)
assert type(out) is unicode
assert u"ä" in out
# encoding option
fmt = HtmlFormatter(encoding="latin1")
tokens = [(Text, u"ä")]
assert u"ä".encode("latin1") in format(tokens, fmt)
# encoding and outencoding option
fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
tokens = [(Text, u"ä")]
assert u"ä".encode("utf8") in format(tokens, fmt)
def test_formatter_unicode_handling():
# test that the formatter supports encoding and Unicode
tokens = list(lexers.PythonLexer(encoding='utf-8').
get_tokens("def f(): 'ä'"))
def verify(formatter):
try:
inst = formatter(encoding=None)
except (ImportError, FontNotFound):
# some dependency or font not installed
return
if formatter.name != 'Raw tokens':
out = format(tokens, inst)
if formatter.unicodeoutput:
assert type(out) is unicode
inst = formatter(encoding='utf-8')
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
# Cannot test for encoding, since formatters may have to escape
# non-ASCII characters.
else:
inst = formatter()
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
for formatter, info in formatters.FORMATTERS.iteritems():
yield verify, formatter
def test_get_formatters():
# test that the formatters functions work
x = formatters.get_formatter_by_name("html", opt="val")
assert isinstance(x, formatters.HtmlFormatter)
assert x.options["opt"] == "val"
x = formatters.get_formatter_for_filename("a.html", opt="val")
assert isinstance(x, formatters.HtmlFormatter)
assert x.options["opt"] == "val"
def test_styles():
# minimal style test
from pygments.formatters import HtmlFormatter
fmt = HtmlFormatter(style="pastie")
class FiltersTest(unittest.TestCase):
def test_basic(self):
filter_args = {
'whitespace': {'spaces': True, 'tabs': True, 'newlines': True},
'highlight': {'names': ['isinstance', 'lexers', 'x']},
}
for x in filters.FILTERS.keys():
lx = lexers.PythonLexer()
lx.add_filter(x, **filter_args.get(x, {}))
fp = open(TESTFILE, 'rb')
try:
text = fp.read().decode('utf-8')
finally:
fp.close()
tokens = list(lx.get_tokens(text))
roundtext = ''.join([t[1] for t in tokens])
if x not in ('whitespace', 'keywordcase'):
# these filters change the text
self.assertEqual(roundtext, text,
"lexer roundtrip with %s filter failed" % x)
def test_raiseonerror(self):
lx = lexers.PythonLexer()
lx.add_filter('raiseonerror', excclass=RuntimeError)
self.assertRaises(RuntimeError, list, lx.get_tokens('$'))
def test_whitespace(self):
lx = lexers.PythonLexer()
lx.add_filter('whitespace', spaces='%')
fp = open(TESTFILE, 'rb')
try:
text = fp.read().decode('utf-8')
finally:
fp.close()
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assertFalse(' ' in lxtext)
def test_keywordcase(self):
lx = lexers.PythonLexer()
lx.add_filter('keywordcase', case='capitalize')
fp = open(TESTFILE, 'rb')
try:
text = fp.read().decode('utf-8')
finally:
fp.close()
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assertTrue('Def' in lxtext and 'Class' in lxtext)
def test_codetag(self):
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
text = u'# BUG: text'
tokens = list(lx.get_tokens(text))
self.assertEqual('# ', tokens[0][1])
self.assertEqual('BUG', tokens[1][1])
def test_codetag_boundary(self):
# ticket #368
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
text = u'# DEBUG: text'
tokens = list(lx.get_tokens(text))
self.assertEqual('# DEBUG: text', tokens[0][1])
|
zaffra/Inquire
|
refs/heads/master
|
GAE/django/db/backends/creation.py
|
8
|
import sys
import time
from django.conf import settings
from django.core.management import call_command
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
from django.db import models
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
if verbosity >= 1:
print "Creating test database '%s'..." % self.connection.alias
test_database_name = self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb', verbosity=max(verbosity - 1, 0), interactive=False, database=self.connection.alias)
if settings.CACHE_BACKEND.startswith('db://'):
from django.core.cache import parse_backend_uri, cache
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
_, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
call_command('createcachetable', cache_name, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
if self.connection.settings_dict['TEST_NAME']:
test_database_name = self.connection.settings_dict['TEST_NAME']
else:
test_database_name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
if verbosity >= 1:
print "Destroying test database '%s'..." % self.connection.alias
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
|
fbradyirl/home-assistant
|
refs/heads/dev
|
homeassistant/components/microsoft_face_detect/__init__.py
|
36
|
"""The microsoft_face_detect component."""
|
mandeepdhami/neutron
|
refs/heads/master
|
neutron/tests/unit/agent/l3/test_dvr_local_router.py
|
3
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from oslo_log import log
from oslo_utils import uuidutils
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr_local_router as dvr_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
FIP_PRI = 32768
HOSTNAME = 'myhost'
class TestDvrRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestDvrRouterOperations, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.register_opts(external_process.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch('neutron.agent.linux.utils'
'.ensure_dir').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.agent.linux.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _create_router(self, router=None, **kwargs):
agent_conf = mock.Mock()
self.router_id = _uuid()
if not router:
router = mock.MagicMock()
return dvr_router.DvrLocalRouter(mock.sentinel.agent,
mock.sentinel.myhost,
self.router_id,
router,
agent_conf,
mock.sentinel.interface_driver,
**kwargs)
def test_get_floating_ips_dvr(self):
router = mock.MagicMock()
router.get.return_value = [{'host': mock.sentinel.myhost},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual([{'host': mock.sentinel.myhost}], fips)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'IPRule')
def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_adv_notif):
router = mock.MagicMock()
ri = self._create_router(router)
ext_net_id = _uuid()
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': ext_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': ext_net_id,
'port_id': _uuid()}
ri.fip_ns = mock.Mock()
ri.fip_ns.agent_gateway_port = agent_gw_port
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.dist_fip_count = 0
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
ri.floating_ip_added_dist(fip, ip_cidr)
mIPRule().rule.add.assert_called_with(ip='192.168.0.1',
table=16,
priority=FIP_PRI)
self.assertEqual(1, ri.dist_fip_count)
# TODO(mrsmith): add more asserts
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'IPRule')
def test_floating_ip_removed_dist(self, mIPRule, mIPDevice, mIPWrapper):
router = mock.MagicMock()
ri = self._create_router(router)
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
fip_cidr = '11.22.33.44/24'
ri.dist_fip_count = 2
ri.fip_ns = mock.Mock()
ri.fip_ns.get_name.return_value = 'fip_ns_name'
ri.floating_ips_dict['11.22.33.44'] = FIP_PRI
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
ri.fip_ns.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
ri.floating_ip_removed_dist(fip_cidr)
mIPRule().rule.delete.assert_called_with(
ip=str(netaddr.IPNetwork(fip_cidr).ip), table=16, priority=FIP_PRI)
mIPDevice().route.delete_route.assert_called_with(fip_cidr, str(s.ip))
self.assertFalse(ri.fip_ns.unsubscribe.called)
ri.dist_fip_count = 1
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('15.1.2.3/32')
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
fip_ns = ri.fip_ns
ri.floating_ip_removed_dist(fip_cidr)
self.assertTrue(fip_ns.destroyed)
mIPWrapper().del_veth.assert_called_once_with(
fip_ns.get_int_device_name(router['id']))
mIPDevice().route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=16)
fip_ns.unsubscribe.assert_called_once_with(ri.router_id)
def _test_add_floating_ip(self, ri, fip, is_failure):
ri._add_fip_addr_to_device = mock.Mock(return_value=is_failure)
ri.floating_ip_added_dist = mock.Mock()
result = ri.add_floating_ip(fip,
mock.sentinel.interface_name,
mock.sentinel.device)
ri._add_fip_addr_to_device.assert_called_once_with(
fip, mock.sentinel.device)
return result
def test_add_floating_ip(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip, True)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_error(self):
ri = self._create_router(mock.MagicMock())
result = self._test_add_floating_ip(
ri, {'floating_ip_address': '15.1.2.3'}, False)
self.assertFalse(ri.floating_ip_added_dist.called)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
@mock.patch.object(router_info.RouterInfo, 'remove_floating_ip')
def test_remove_floating_ip(self, super_remove_floating_ip):
ri = self._create_router(mock.MagicMock())
ri.floating_ip_removed_dist = mock.Mock()
ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr)
super_remove_floating_ip.assert_called_once_with(
mock.sentinel.device, mock.sentinel.ip_cidr)
ri.floating_ip_removed_dist.assert_called_once_with(
mock.sentinel.ip_cidr)
def test__get_internal_port(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id))
def test__get_internal_port_not_found(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(None, ri._get_internal_port(mock.sentinel.subnet_id2))
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
subnet_id = l3_test_common.get_subnet_id(ports[0])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '1.2.3.4',
'prefixlen': 24,
'subnet_id': subnet_id}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnets'] = [{'id': subnet_id,
'cidr': '1.2.3.0/24'}]
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent.add_arp_entry(None, payload)
def test__update_arp_entry_with_no_subnet(self):
ri = dvr_router.DvrLocalRouter(
mock.sentinel.agent,
HOSTNAME,
'foo_router_id',
{'distributed': True, 'gw_port_host': HOSTNAME},
**self.ri_kwargs)
with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f:
ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add')
self.assertFalse(f.call_count)
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
'1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
def test_get_floating_agent_gw_interfaces(self):
fake_network_id = _uuid()
subnet_id = _uuid()
agent_gateway_port = (
[{'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'binding:host_id': 'myhost',
'device_owner': 'network:floatingip_agent_gateway',
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
self.assertEqual(
agent_gateway_port[0],
ri.get_floating_agent_gw_interface(fake_network_id))
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.dist_fip_count = 0
fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id)
subnet_id = _uuid()
fip_ns.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
ri.external_gateway_updated(ex_gw_port, interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
ri._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_agent_mode_host(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME,
'dvr', 0)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
self.conf.set_override('state_path', '/tmp')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr'
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.fip_ns = agent.get_fip_ns(external_net_id)
subnet_id = _uuid()
ri.fip_ns.agent_gateway_port = {
'fixed_ips': [{
'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id
}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': external_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.dist_fip_count = 1
ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))]
self.mock_ip_dev.addr.list.return_value = [
{'cidr': vm_floating_ip + '/32'},
{'cidr': '19.4.4.1/24'}]
self.device_exists.return_value = True
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev,
'19.4.4.2/32')
|
nmrugg/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/infoq.py
|
92
|
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': '12-jan-pythonthings',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
video_filename = playpath.split('/')[-1]
video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': compat_urlparse.urljoin(url, http_base) + real_id,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
|
davidvon/pipa-pay-server
|
refs/heads/master
|
site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
lxshopping/p2pool
|
refs/heads/master
|
p2pool/bitcoin/getwork.py
|
267
|
'''
Representation of a getwork request/reply
'''
from __future__ import division
from . import data as bitcoin_data
from . import sha256
from p2pool.util import pack
def _swap4(s):
if len(s) % 4:
raise ValueError()
return ''.join(s[x:x+4][::-1] for x in xrange(0, len(s), 4))
class BlockAttempt(object):
def __init__(self, version, previous_block, merkle_root, timestamp, bits, share_target):
self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target = version, previous_block, merkle_root, timestamp, bits, share_target
def __hash__(self):
return hash((self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target))
def __eq__(self, other):
if not isinstance(other, BlockAttempt):
raise ValueError('comparisons only valid with other BlockAttempts')
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'BlockAttempt(%s)' % (', '.join('%s=%r' % (k, v) for k, v in self.__dict__.iteritems()),)
def getwork(self, **extra):
if 'data' in extra or 'hash1' in extra or 'target' in extra or 'midstate' in extra:
raise ValueError()
block_data = bitcoin_data.block_header_type.pack(dict(
version=self.version,
previous_block=self.previous_block,
merkle_root=self.merkle_root,
timestamp=self.timestamp,
bits=self.bits,
nonce=0,
))
getwork = {
'data': _swap4(block_data).encode('hex') + '000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target': pack.IntType(256).pack(self.share_target).encode('hex'),
'midstate': _swap4(sha256.process(sha256.initial_state, block_data[:64])).encode('hex'),
}
getwork = dict(getwork)
getwork.update(extra)
return getwork
@classmethod
def from_getwork(cls, getwork):
attrs = decode_data(getwork['data'])
return cls(
version=attrs['version'],
previous_block=attrs['previous_block'],
merkle_root=attrs['merkle_root'],
timestamp=attrs['timestamp'],
bits=attrs['bits'],
share_target=pack.IntType(256).unpack(getwork['target'].decode('hex')),
)
def update(self, **kwargs):
d = self.__dict__.copy()
d.update(kwargs)
return self.__class__(**d)
def decode_data(data):
return bitcoin_data.block_header_type.unpack(_swap4(data.decode('hex'))[:80])
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/django/db/models/fields/related_lookups.py
|
11
|
from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, IsNull, LessThan,
LessThanOrEqual,
)
class MultiColSource(object):
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
try:
value_list.append(getattr(value, source.attname))
except AttributeError:
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
return (value.pk,)
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_value. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1]
self.rhs = [target_field.get_prep_value(v) for v in self.rhs]
return super(RelatedIn, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
return super(RelatedIn, self).as_sql(compiler, connection)
class RelatedLookupMixin(object):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1]
self.rhs = target_field.get_prep_value(self.rhs)
return super(RelatedLookupMixin, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super(RelatedLookupMixin, self).as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
class RelatedIsNull(RelatedLookupMixin, IsNull):
pass
|
titasakgm/brc-stock
|
refs/heads/master
|
openerp/addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/django/db/backends/sqlite3/introspection.py
|
15
|
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
raise NotImplementedError
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in self._table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
|
b0ttl3z/SickRage
|
refs/heads/master
|
lib/hachoir_metadata/register.py
|
67
|
from hachoir_core.i18n import _
from hachoir_core.tools import (
humanDuration, humanBitRate,
humanFrequency, humanBitSize, humanFilesize,
humanDatetime)
from hachoir_core.language import Language
from hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER
from datetime import date, datetime, timedelta
from hachoir_metadata.formatter import (
humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude,
humanPixelSize, humanDPI)
from hachoir_metadata.setter import (
setDatetime, setTrackNumber, setTrackTotal, setLanguage)
from hachoir_metadata.metadata_item import Data
MIN_SAMPLE_RATE = 1000 # 1 kHz
MAX_SAMPLE_RATE = 192000 # 192 kHz
MAX_NB_CHANNEL = 8 # 8 channels
MAX_WIDTH = 20000 # 20 000 pixels
MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s
MAX_HEIGHT = MAX_WIDTH
MAX_DPI_WIDTH = 10000
MAX_DPI_HEIGHT = MAX_DPI_WIDTH
MAX_NB_COLOR = 2 ** 24 # 16 million of color
MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel
MAX_FRAME_RATE = 150 # 150 frame/sec
MAX_NB_PAGE = 20000
MAX_COMPR_RATE = 1000.0
MIN_COMPR_RATE = 0.001
MAX_TRACK = 999
DURATION_FILTER = Filter(timedelta,
timedelta(milliseconds=1),
timedelta(days=365))
def registerAllItems(meta):
meta.register(Data("title", 100, _("Title"), type=unicode))
meta.register(Data("artist", 101, _("Artist"), type=unicode))
meta.register(Data("author", 102, _("Author"), type=unicode))
meta.register(Data("music_composer", 103, _("Music composer"), type=unicode))
meta.register(Data("album", 200, _("Album"), type=unicode))
meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde
type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER))
meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE)))
meta.register(Data("music_genre", 203, _("Music genre"), type=unicode))
meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language))
meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("organization", 210, _("Organization"), type=unicode))
meta.register(Data("version", 220, _("Version")))
meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long)))
meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float)))
meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long)))
meta.register(Data("image_orientation", 306, _("Image orientation")))
meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long)))
meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long)))
meta.register(Data("filename", 309, _("File name"), type=unicode))
meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("pixel_format", 311, _("Pixel format")))
meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float)))
meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI))
meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI))
meta.register(Data("file_attr", 400, _("File attributes")))
meta.register(Data("file_type", 401, _("File type")))
meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode))
meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("latitude", 510, _("Latitude"), type=float))
meta.register(Data("longitude", 511, _("Longitude"), type=float))
meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude))
meta.register(Data("location", 530, _("Location"), type=unicode))
meta.register(Data("city", 531, _("City"), type=unicode))
meta.register(Data("country", 532, _("Country"), type=unicode))
meta.register(Data("charset", 540, _("Charset"), type=unicode))
meta.register(Data("font_weight", 550, _("Font weight")))
meta.register(Data("camera_aperture", 520, _("Camera aperture")))
meta.register(Data("camera_focal", 521, _("Camera focal")))
meta.register(Data("camera_exposure", 522, _("Camera exposure")))
meta.register(Data("camera_brightness", 530, _("Camera brightness")))
meta.register(Data("camera_model", 531, _("Camera model"), type=unicode))
meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode))
meta.register(Data("compression", 600, _("Compression")))
meta.register(Data("copyright", 601, _("Copyright"), type=unicode))
meta.register(Data("url", 602, _("URL"), type=unicode))
meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate,
filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float)))
meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate,
filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float)))
meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float)))
meta.register(Data("os", 900, _("OS"), type=unicode))
meta.register(Data("producer", 901, _("Producer"), type=unicode))
meta.register(Data("comment", 902, _("Comment"), type=unicode))
meta.register(Data("format_version", 950, _("Format version"), type=unicode))
meta.register(Data("mime_type", 951, _("MIME type"), type=unicode))
meta.register(Data("endian", 952, _("Endianness"), type=unicode))
|
cartersgenes/namebench
|
refs/heads/master
|
nb_third_party/jinja2/environment.py
|
199
|
# -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode, _encode_filename
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instanciated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, basestring):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneus environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.newline_sequence = newline_sequence
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.bytecode_cache = None
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.iteritems():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.iteritems():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.iteritems():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, _encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = unicode(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), unicode(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, basestring):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = generate(source, self, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = _encode_filename(filename)
return compile(source, filename, 'exec')
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Compiles all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
import imp, struct, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0755 << 16L
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError, e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = compile(code, _encode_filename(filename), 'exec')
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = filter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
raise exc_type, exc_value, tb
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, basestring):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
newline_sequence, frozenset(extensions), optimized, undefined,
finalize, autoescape, None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec code in namespace
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(map(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return unicode(self).encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specifiy an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, basestring):
fp = file(fp, 'w')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = self._gen.next
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = generator(self._gen.next).next
def __iter__(self):
return self
def next(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
|
VitalPet/addons-onestein
|
refs/heads/10.0
|
hr_absenteeism/models/hr_absenteeism_dates.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class hr_absenteeism_dates(models.Model):
_name = "hr.absenteeism.dates"
_description = "Absenteeism Notification Dates"
name = fields.Char("Notification Name")
absent_notify_date = fields.Datetime("Absent Notification Date")
holiday_id = fields.Many2one(
"hr.holidays",
string="Related Holiday",
ondelete="cascade"
)
notification_id = fields.Many2one(
"hr.absenteeism.notifications",
string="Related notification"
)
|
cliffe/SecGen
|
refs/heads/master
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/test_bdist.py
|
127
|
"""Tests for distutils.command.bdist."""
import os
import unittest
from test.test_support import run_unittest
from distutils.command.bdist import bdist
from distutils.tests import support
class BuildTestCase(support.TempdirManager,
unittest.TestCase):
def test_formats(self):
# let's create a command and make sure
# we can set the format
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.formats = ['msi']
cmd.ensure_finalized()
self.assertEqual(cmd.formats, ['msi'])
# what formats does bdist offer?
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
'wininst', 'zip', 'ztar']
found = sorted(cmd.format_command)
self.assertEqual(found, formats)
def test_skip_build(self):
# bug #10946: bdist --skip-build should trickle down to subcommands
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.skip_build = 1
cmd.ensure_finalized()
dist.command_obj['bdist'] = cmd
names = ['bdist_dumb', 'bdist_wininst']
# bdist_rpm does not support --skip-build
if os.name == 'nt':
names.append('bdist_msi')
for name in names:
subcmd = cmd.get_finalized_command(name)
self.assertTrue(subcmd.skip_build,
'%s should take --skip-build from bdist' % name)
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
mozilla/stoneridge
|
refs/heads/master
|
python/src/Mac/Modules/evt/evtscan.py
|
34
|
# Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "Events"
SHORT = "evt"
OBJECT = "NOTUSED"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"KeyTranslate",
"GetEventMask", # I cannot seem to find this routine...
"WaitNextEvent", # Manually generated because of optional region
# Constants with funny definitions
"osEvtMessageMask",
# OS8 calls
'SystemEvent',
'SystemTask',
'SystemClick',
'GetOSEvent',
'OSEventAvail',
]
def makeblacklisttypes(self):
return [
"EvQElPtr", "QHdrPtr"
]
def makerepairinstructions(self):
return [
([("void_ptr", "*", "InMode"), ("long", "*", "InMode")],
[("InBuffer", "*", "*")]),
([("void", "*", "OutMode"), ("long", "*", "InMode"),
("long", "*", "OutMode")],
[("VarVarOutBuffer", "*", "InOutMode")]),
([("void", "wStorage", "OutMode")],
[("NullStorage", "*", "InMode")]),
# GetKeys
([('KeyMap', 'theKeys', 'InMode')],
[('*', '*', 'OutMode')]),
# GetTicker
([('unsigned long', '*', '*')],
[('unsigned_long', '*', '*')]),
]
if __name__ == "__main__":
main()
|
madjelan/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/factor_analysis.py
|
206
|
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Licence: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, fast_dot, randomized_svd, squared_norm
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Read more in the :ref:`User Guide <FA>`.
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int or RandomState
Pseudo number generator state used for random sampling. Only used
if ``svd_method`` equals 'randomized'
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'components_')
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
|
rockyzhang/zhangyanhit-python-for-android-mips
|
refs/heads/master
|
python-build/python-libs/gdata/samples/contacts/contacts_example.py
|
89
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import sys
import getopt
import getpass
import atom
import gdata.contacts
import gdata.contacts.service
class ContactsSample(object):
"""ContactsSample object demonstrates operations with the Contacts feed."""
def __init__(self, email, password):
"""Constructor for the ContactsSample object.
Takes an email and password corresponding to a gmail account to
demonstrate the functionality of the Contacts feed.
Args:
email: [string] The e-mail address of the account to use for the sample.
password: [string] The password corresponding to the account specified by
the email parameter.
Yields:
A ContactsSample object used to run the sample demonstrating the
functionality of the Contacts feed.
"""
self.gd_client = gdata.contacts.service.ContactsService()
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-ContactsPythonSample-1'
self.gd_client.ProgrammaticLogin()
def PrintFeed(self, feed, ctr=0):
"""Prints out the contents of a feed to the console.
Args:
feed: A gdata.contacts.ContactsFeed instance.
ctr: [int] The number of entries in this feed previously printed. This
allows continuous entry numbers when paging through a feed.
Returns:
The number of entries printed, including those previously printed as
specified in ctr. This is for passing as an argument to ctr on
successive calls to this method.
"""
if not feed.entry:
print '\nNo entries in feed.\n'
return 0
for i, entry in enumerate(feed.entry):
print '\n%s %s' % (ctr+i+1, entry.title.text)
if entry.content:
print ' %s' % (entry.content.text)
for email in entry.email:
if email.primary and email.primary == 'true':
print ' %s' % (email.address)
# Show the contact groups that this contact is a member of.
for group in entry.group_membership_info:
print ' Member of group: %s' % (group.href)
# Display extended properties.
for extended_property in entry.extended_property:
if extended_property.value:
value = extended_property.value
else:
value = extended_property.GetXmlBlobString()
print ' Extended Property %s: %s' % (extended_property.name, value)
return len(feed.entry) + ctr
def PrintPaginatedFeed(self, feed, print_method):
""" Print all pages of a paginated feed.
This will iterate through a paginated feed, requesting each page and
printing the entries contained therein.
Args:
feed: A gdata.contacts.ContactsFeed instance.
print_method: The method which will be used to print each page of the
feed. Must accept these two named arguments:
feed: A gdata.contacts.ContactsFeed instance.
ctr: [int] The number of entries in this feed previously
printed. This allows continuous entry numbers when paging
through a feed.
"""
ctr = 0
while feed:
# Print contents of current feed
ctr = print_method(feed=feed, ctr=ctr)
# Prepare for next feed iteration
next = feed.GetNextLink()
feed = None
if next:
if self.PromptOperationShouldContinue():
# Another feed is available, and the user has given us permission
# to fetch it
feed = self.gd_client.GetContactsFeed(next.href)
else:
# User has asked us to terminate
feed = None
def PromptOperationShouldContinue(self):
""" Display a "Continue" prompt.
This give is used to give users a chance to break out of a loop, just in
case they have too many contacts/groups.
Returns:
A boolean value, True if the current operation should continue, False if
the current operation should terminate.
"""
while True:
input = raw_input("Continue [Y/n]? ")
if input is 'N' or input is 'n':
return False
elif input is 'Y' or input is 'y' or input is '':
return True
def ListAllContacts(self):
"""Retrieves a list of contacts and displays name and primary email."""
feed = self.gd_client.GetContactsFeed()
self.PrintPaginatedFeed(feed, self.PrintGroupsFeed)
def PrintGroupsFeed(self, feed, ctr):
if not feed.entry:
print '\nNo groups in feed.\n'
return 0
for i, entry in enumerate(feed.entry):
print '\n%s %s' % (ctr+i+1, entry.title.text)
if entry.content:
print ' %s' % (entry.content.text)
# Display the group id which can be used to query the contacts feed.
print ' Group ID: %s' % entry.id.text
# Display extended properties.
for extended_property in entry.extended_property:
if extended_property.value:
value = extended_property.value
else:
value = extended_property.GetXmlBlobString()
print ' Extended Property %s: %s' % (extended_property.name, value)
return len(feed.entry) + ctr
def ListAllGroups(self):
feed = self.gd_client.GetGroupsFeed()
self.PrintPaginatedFeed(feed, self.PrintGroupsFeed)
def CreateMenu(self):
"""Prompts that enable a user to create a contact."""
name = raw_input('Enter contact\'s name: ')
notes = raw_input('Enter notes for contact: ')
primary_email = raw_input('Enter primary email address: ')
new_contact = gdata.contacts.ContactEntry(title=atom.Title(text=name))
new_contact.content = atom.Content(text=notes)
# Create a work email address for the contact and use as primary.
new_contact.email.append(gdata.contacts.Email(address=primary_email,
primary='true', rel=gdata.contacts.REL_WORK))
entry = self.gd_client.CreateContact(new_contact)
if entry:
print 'Creation successful!'
print 'ID for the new contact:', entry.id.text
else:
print 'Upload error.'
def QueryMenu(self):
"""Prompts for updated-min query parameters and displays results."""
updated_min = raw_input(
'Enter updated min (example: 2007-03-16T00:00:00): ')
query = gdata.contacts.service.ContactsQuery()
query.updated_min = updated_min
feed = self.gd_client.GetContactsFeed(query.ToUri())
self.PrintFeed(feed)
def QueryGroupsMenu(self):
"""Prompts for updated-min query parameters and displays results."""
updated_min = raw_input(
'Enter updated min (example: 2007-03-16T00:00:00): ')
query = gdata.service.Query(feed='/m8/feeds/groups/default/full')
query.updated_min = updated_min
feed = self.gd_client.GetGroupsFeed(query.ToUri())
self.PrintGroupsFeed(feed)
def _SelectContact(self):
feed = self.gd_client.GetContactsFeed()
self.PrintFeed(feed)
selection = 5000
while selection > len(feed.entry)+1 or selection < 1:
selection = int(raw_input(
'Enter the number for the contact you would like to modify: '))
return feed.entry[selection-1]
def UpdateContactMenu(self):
selected_entry = self._SelectContact()
new_name = raw_input('Enter a new name for the contact: ')
if not selected_entry.title:
selected_entry.title = atom.Title()
selected_entry.title.text = new_name
self.gd_client.UpdateContact(selected_entry.GetEditLink().href, selected_entry)
def DeleteContactMenu(self):
selected_entry = self._SelectContact()
self.gd_client.DeleteContact(selected_entry.GetEditLink().href)
def PrintMenu(self):
"""Displays a menu of options for the user to choose from."""
print ('\nContacts Sample\n'
'1) List all of your contacts.\n'
'2) Create a contact.\n'
'3) Query contacts on updated time.\n'
'4) Modify a contact.\n'
'5) Delete a contact.\n'
'6) List all of your contact groups.\n'
'7) Query your groups on updated time.\n'
'8) Exit.\n')
def GetMenuChoice(self, max):
"""Retrieves the menu selection from the user.
Args:
max: [int] The maximum number of allowed choices (inclusive)
Returns:
The integer of the menu item chosen by the user.
"""
while True:
input = raw_input('> ')
try:
num = int(input)
except ValueError:
print 'Invalid choice. Please choose a value between 1 and', max
continue
if num > max or num < 1:
print 'Invalid choice. Please choose a value between 1 and', max
else:
return num
def Run(self):
"""Prompts the user to choose funtionality to be demonstrated."""
try:
while True:
self.PrintMenu()
choice = self.GetMenuChoice(8)
if choice == 1:
self.ListAllContacts()
elif choice == 2:
self.CreateMenu()
elif choice == 3:
self.QueryMenu()
elif choice == 4:
self.UpdateContactMenu()
elif choice == 5:
self.DeleteContactMenu()
elif choice == 6:
self.ListAllGroups()
elif choice == 7:
self.QueryGroupsMenu()
elif choice == 8:
return
except KeyboardInterrupt:
print '\nGoodbye.'
return
def main():
"""Demonstrates use of the Contacts extension using the ContactsSample object."""
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw='])
except getopt.error, msg:
print 'python contacts_example.py --user [username] --pw [password]'
sys.exit(2)
user = ''
pw = ''
# Process options
for option, arg in opts:
if option == '--user':
user = arg
elif option == '--pw':
pw = arg
while not user:
print 'NOTE: Please run these tests only with a test account.'
user = raw_input('Please enter your username: ')
while not pw:
pw = getpass.getpass()
if not pw:
print 'Password cannot be blank.'
try:
sample = ContactsSample(user, pw)
except gdata.service.BadAuthentication:
print 'Invalid user credentials given.'
return
sample.Run()
if __name__ == '__main__':
main()
|
tamsky/ansible-upstream
|
refs/heads/devel
|
lib/ansible/executor/task_result.py
|
42
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.dataloader import DataLoader
class TaskResult:
'''
This class is responsible for interpretting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
'''
def __init__(self, host, task, return_data):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
if 'results' in self._result and self._task.loop:
flag = True
for res in self._result.get('results', []):
if isinstance(res, dict):
flag &= res.get('skipped', False)
return flag
else:
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed') or self._result.get('rc', 0) != 0
def is_unreachable(self):
return self._check_key('unreachable')
def _check_key(self, key):
if 'results' in self._result and self._task.loop:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
else:
return self._result.get(key, False)
|
tchernomax/ansible
|
refs/heads/devel
|
contrib/inventory/rudder.py
|
27
|
#!/usr/bin/env python
# Copyright (c) 2015, Normation SAS
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Rudder external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
a Rudder server. This script is compatible with Rudder 2.10 or later.
The output JSON includes all your Rudder groups, containing the hostnames of
their nodes. Groups and nodes have a variable called rudder_group_id and
rudder_node_id, which is the Rudder internal id of the item, allowing to identify
them uniquely. Hosts variables also include your node properties, which are
key => value properties set by the API and specific to each node.
This script assumes there is an rudder.ini file alongside it. To specify a
different path to rudder.ini, define the RUDDER_INI_PATH environment variable:
export RUDDER_INI_PATH=/path/to/my_rudder.ini
You have to configure your Rudder server information, either in rudder.ini or
by overriding it with environment variables:
export RUDDER_API_VERSION='latest'
export RUDDER_API_TOKEN='my_token'
export RUDDER_API_URI='https://rudder.local/rudder/api'
'''
import sys
import os
import re
import argparse
import six
import httplib2 as http
from time import time
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six.moves.urllib.parse import urlparse
import json
class RudderInventory(object):
def __init__(self):
''' Main execution path '''
# Empty inventory by default
self.inventory = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Create connection
self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation)
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_cache()
data_to_print = {}
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
data_to_print = self.get_list_info()
print(self.json_format_dict(data_to_print, True))
def read_settings(self):
''' Reads the settings from the rudder.ini file '''
if six.PY2:
config = configparser.SafeConfigParser()
else:
config = configparser.ConfigParser()
rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini')
rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path)))
config.read(rudder_ini_path)
self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token'))
self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version'))
self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri'))
self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation')
self.group_name = config.get('rudder', 'group_name')
self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision')
self.cache_path = config.get('rudder', 'cache_path')
self.cache_max_age = config.getint('rudder', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)')
self.args = parser.parse_args()
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path):
mod_time = os.path.getmtime(self.cache_path)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_cache(self):
''' Reads the cache from the cache file sets self.cache '''
cache = open(self.cache_path, 'r')
json_cache = cache.read()
try:
self.inventory = json.loads(json_cache)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache')
def write_cache(self):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(self.inventory, True)
cache = open(self.cache_path, 'w')
cache.write(json_data)
cache.close()
def get_nodes(self):
''' Gets the nodes list from Rudder '''
path = '/nodes?select=nodeAndPolicyServer'
result = self.api_call(path)
nodes = {}
for node in result['data']['nodes']:
nodes[node['id']] = {}
nodes[node['id']]['hostname'] = node['hostname']
if 'properties' in node:
nodes[node['id']]['properties'] = node['properties']
else:
nodes[node['id']]['properties'] = []
return nodes
def get_groups(self):
''' Gets the groups list from Rudder '''
path = '/groups'
result = self.api_call(path)
groups = {}
for group in result['data']['groups']:
groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])}
return groups
def update_cache(self):
''' Fetches the inventory information from Rudder and creates the inventory '''
nodes = self.get_nodes()
groups = self.get_groups()
inventory = {}
for group in groups:
# Check for name collision
if self.fail_if_name_collision:
if groups[group]['name'] in inventory:
self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups')
# Add group to inventory
inventory[groups[group]['name']] = {}
inventory[groups[group]['name']]['hosts'] = []
inventory[groups[group]['name']]['vars'] = {}
inventory[groups[group]['name']]['vars']['rudder_group_id'] = group
for node in groups[group]['hosts']:
# Add node to group
inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname'])
properties = {}
for node in nodes:
# Check for name collision
if self.fail_if_name_collision:
if nodes[node]['hostname'] in properties:
self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts')
# Add node properties to inventory
properties[nodes[node]['hostname']] = {}
properties[nodes[node]['hostname']]['rudder_node_id'] = node
for node_property in nodes[node]['properties']:
properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value']
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = properties
self.inventory = inventory
if self.cache_max_age > 0:
self.write_cache()
def get_list_info(self):
''' Gets inventory information from local cache '''
return self.inventory
def get_host_info(self, hostname):
''' Gets information about a specific host from local cache '''
if hostname in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][hostname]
else:
return {}
def api_call(self, path):
''' Performs an API request '''
headers = {
'X-API-Token': self.token,
'X-API-Version': self.version,
'Content-Type': 'application/json;charset=utf-8'
}
target = urlparse(self.uri + path)
method = 'GET'
body = ''
try:
response, content = self.conn.request(target.geturl(), method, body, headers)
except:
self.fail_with_error('Error connecting to Rudder server')
try:
data = json.loads(content)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')
return data
def fail_with_error(self, err_msg, err_operation=None):
''' Logs an error to std err for ansible-playbook to consume and exit '''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible variable names '''
return re.sub(r'[^A-Za-z0-9\_]', '_', word)
# Run the script
RudderInventory()
|
jclc/discus-inferno
|
refs/heads/master
|
flaskenv/lib/python2.7/site-packages/werkzeug/testsuite/test.py
|
66
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.test
~~~~~~~~~~~~~~~~~~~~~~~
Tests the testing tools.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
import unittest
from cStringIO import StringIO, OutputType
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Request, Response, BaseResponse
from werkzeug.test import Client, EnvironBuilder, create_environ, \
ClientRedirectError, stream_encode_multipart, run_wsgi_app
from werkzeug.utils import redirect
from werkzeug.formparser import parse_form_data
from werkzeug.datastructures import MultiDict
def cookie_app(environ, start_response):
"""A WSGI application which sets a cookie, and returns as a ersponse any
cookie which exists.
"""
response = Response(environ.get('HTTP_COOKIE', 'No Cookie'),
mimetype='text/plain')
response.set_cookie('test', 'test')
return response(environ, start_response)
def redirect_loop_app(environ, start_response):
response = redirect('http://localhost/some/redirect/')
return response(environ, start_response)
def redirect_with_get_app(environ, start_response):
req = Request(environ)
if req.url not in ('http://localhost/',
'http://localhost/first/request',
'http://localhost/some/redirect/'):
assert False, 'redirect_demo_app() did not expect URL "%s"' % req.url
if '/some/redirect' not in req.url:
response = redirect('http://localhost/some/redirect/')
else:
response = Response('current url: %s' % req.url)
return response(environ, start_response)
def redirect_with_post_app(environ, start_response):
req = Request(environ)
if req.url == 'http://localhost/some/redirect/':
assert req.method == 'GET', 'request should be GET'
assert not req.form, 'request should not have data'
response = Response('current url: %s' % req.url)
else:
response = redirect('http://localhost/some/redirect/')
return response(environ, start_response)
def external_redirect_demo_app(environ, start_response):
response = redirect('http://example.com/')
return response(environ, start_response)
def external_subdomain_redirect_demo_app(environ, start_response):
if 'test.example.com' in environ['HTTP_HOST']:
response = Response('redirected successfully to subdomain')
else:
response = redirect('http://test.example.com/login')
return response(environ, start_response)
def multi_value_post_app(environ, start_response):
req = Request(environ)
assert req.form['field'] == 'val1', req.form['field']
assert req.form.getlist('field') == ['val1', 'val2'], req.form.getlist('field')
response = Response('ok')
return response(environ, start_response)
class TestTestCase(WerkzeugTestCase):
def test_cookie_forging(self):
c = Client(cookie_app)
c.set_cookie('localhost', 'foo', 'bar')
appiter, code, headers = c.open()
assert list(appiter) == ['foo=bar']
def test_set_cookie_app(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
assert 'Set-Cookie' in dict(headers)
def test_cookiejar_stores_cookie(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
assert 'test' in c.cookie_jar._cookies['localhost.local']['/']
def test_no_initial_cookie(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
assert ''.join(appiter) == 'No Cookie'
def test_resent_cookie(self):
c = Client(cookie_app)
c.open()
appiter, code, headers = c.open()
assert ''.join(appiter) == 'test=test'
def test_disable_cookies(self):
c = Client(cookie_app, use_cookies=False)
c.open()
appiter, code, headers = c.open()
assert ''.join(appiter) == 'No Cookie'
def test_cookie_for_different_path(self):
c = Client(cookie_app)
c.open('/path1')
appiter, code, headers = c.open('/path2')
assert ''.join(appiter) == 'test=test'
def test_environ_builder_basics(self):
b = EnvironBuilder()
assert b.content_type is None
b.method = 'POST'
assert b.content_type == 'application/x-www-form-urlencoded'
b.files.add_file('test', StringIO('test contents'), 'test.txt')
assert b.files['test'].content_type == 'text/plain'
assert b.content_type == 'multipart/form-data'
b.form['test'] = 'normal value'
req = b.get_request()
b.close()
assert req.url == 'http://localhost/'
assert req.method == 'POST'
assert req.form['test'] == 'normal value'
assert req.files['test'].content_type == 'text/plain'
assert req.files['test'].filename == 'test.txt'
assert req.files['test'].read() == 'test contents'
def test_environ_builder_headers(self):
b = EnvironBuilder(environ_base={'HTTP_USER_AGENT': 'Foo/0.1'},
environ_overrides={'wsgi.version': (1, 1)})
b.headers['X-Suck-My-Dick'] = 'very well sir'
env = b.get_environ()
assert env['HTTP_USER_AGENT'] == 'Foo/0.1'
assert env['HTTP_X_SUCK_MY_DICK'] == 'very well sir'
assert env['wsgi.version'] == (1, 1)
b.headers['User-Agent'] = 'Bar/1.0'
env = b.get_environ()
assert env['HTTP_USER_AGENT'] == 'Bar/1.0'
def test_environ_builder_paths(self):
b = EnvironBuilder(path='/foo', base_url='http://example.com/')
assert b.base_url == 'http://example.com/'
assert b.path == '/foo'
assert b.script_root == ''
assert b.host == 'example.com'
b = EnvironBuilder(path='/foo', base_url='http://example.com/bar')
assert b.base_url == 'http://example.com/bar/'
assert b.path == '/foo'
assert b.script_root == '/bar'
assert b.host == 'example.com'
b.host = 'localhost'
assert b.base_url == 'http://localhost/bar/'
b.base_url = 'http://localhost:8080/'
assert b.host == 'localhost:8080'
assert b.server_name == 'localhost'
assert b.server_port == 8080
b.host = 'foo.invalid'
b.url_scheme = 'https'
b.script_root = '/test'
env = b.get_environ()
assert env['SERVER_NAME'] == 'foo.invalid'
assert env['SERVER_PORT'] == '443'
assert env['SCRIPT_NAME'] == '/test'
assert env['PATH_INFO'] == '/foo'
assert env['HTTP_HOST'] == 'foo.invalid'
assert env['wsgi.url_scheme'] == 'https'
assert b.base_url == 'https://foo.invalid/test/'
def test_environ_builder_content_type(self):
builder = EnvironBuilder()
assert builder.content_type is None
builder.method = 'POST'
assert builder.content_type == 'application/x-www-form-urlencoded'
builder.form['foo'] = 'bar'
assert builder.content_type == 'application/x-www-form-urlencoded'
builder.files.add_file('blafasel', StringIO('foo'), 'test.txt')
assert builder.content_type == 'multipart/form-data'
req = builder.get_request()
assert req.form['foo'] == 'bar'
assert req.files['blafasel'].read() == 'foo'
def test_environ_builder_stream_switch(self):
d = MultiDict(dict(foo=u'bar', blub=u'blah', hu=u'hum'))
for use_tempfile in False, True:
stream, length, boundary = stream_encode_multipart(
d, use_tempfile, threshold=150)
assert isinstance(stream, OutputType) != use_tempfile
form = parse_form_data({'wsgi.input': stream, 'CONTENT_LENGTH': str(length),
'CONTENT_TYPE': 'multipart/form-data; boundary="%s"' %
boundary})[1]
assert form == d
def test_create_environ(self):
env = create_environ('/foo?bar=baz', 'http://example.org/')
expected = {
'wsgi.multiprocess': False,
'wsgi.version': (1, 0),
'wsgi.run_once': False,
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.url_scheme': 'http',
'SCRIPT_NAME': '',
'CONTENT_TYPE': '',
'CONTENT_LENGTH': '0',
'SERVER_NAME': 'example.org',
'REQUEST_METHOD': 'GET',
'HTTP_HOST': 'example.org',
'PATH_INFO': '/foo',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': 'bar=baz'
}
for key, value in expected.iteritems():
assert env[key] == value
assert env['wsgi.input'].read(0) == ''
assert create_environ('/foo', 'http://example.com/')['SCRIPT_NAME'] == ''
def test_file_closing(self):
closed = []
class SpecialInput(object):
def read(self):
return ''
def close(self):
closed.append(self)
env = create_environ(data={'foo': SpecialInput()})
assert len(closed) == 1
builder = EnvironBuilder()
builder.files.add_file('blah', SpecialInput())
builder.close()
assert len(closed) == 2
def test_follow_redirect(self):
env = create_environ('/', base_url='http://localhost')
c = Client(redirect_with_get_app)
appiter, code, headers = c.open(environ_overrides=env, follow_redirects=True)
assert code == '200 OK'
assert ''.join(appiter) == 'current url: http://localhost/some/redirect/'
# Test that the :cls:`Client` is aware of user defined response wrappers
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
resp = c.get('/', follow_redirects=True)
assert resp.status_code == 200
assert resp.data == 'current url: http://localhost/some/redirect/'
# test with URL other than '/' to make sure redirected URL's are correct
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
resp = c.get('/first/request', follow_redirects=True)
assert resp.status_code == 200
assert resp.data == 'current url: http://localhost/some/redirect/'
def test_follow_external_redirect(self):
env = create_environ('/', base_url='http://localhost')
c = Client(external_redirect_demo_app)
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
def test_follow_external_redirect_on_same_subdomain(self):
env = create_environ('/', base_url='http://example.com')
c = Client(external_subdomain_redirect_demo_app, allow_subdomain_redirects=True)
c.get(environ_overrides=env, follow_redirects=True)
# check that this does not work for real external domains
env = create_environ('/', base_url='http://localhost')
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
# check that subdomain redirects fail if no `allow_subdomain_redirects` is applied
c = Client(external_subdomain_redirect_demo_app)
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
def test_follow_redirect_loop(self):
c = Client(redirect_loop_app, response_wrapper=BaseResponse)
with self.assert_raises(ClientRedirectError):
resp = c.get('/', follow_redirects=True)
def test_follow_redirect_with_post(self):
c = Client(redirect_with_post_app, response_wrapper=BaseResponse)
resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42')
assert resp.status_code == 200
assert resp.data == 'current url: http://localhost/some/redirect/'
def test_path_info_script_name_unquoting(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [environ['PATH_INFO'] + '\n' + environ['SCRIPT_NAME']]
c = Client(test_app, response_wrapper=BaseResponse)
resp = c.get('/foo%40bar')
assert resp.data == '/foo@bar\n'
c = Client(test_app, response_wrapper=BaseResponse)
resp = c.get('/foo%40bar', 'http://localhost/bar%40baz')
assert resp.data == '/foo@bar\n/bar@baz'
def test_multi_value_submit(self):
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
data = {
'field': ['val1','val2']
}
resp = c.post('/', data=data)
assert resp.status_code == 200
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
data = MultiDict({
'field': ['val1','val2']
})
resp = c.post('/', data=data)
assert resp.status_code == 200
def test_iri_support(self):
b = EnvironBuilder(u'/föö-bar', base_url=u'http://☃.net/')
assert b.path == '/f%C3%B6%C3%B6-bar'
assert b.base_url == 'http://xn--n3h.net/'
def test_run_wsgi_apps(self):
def simple_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['Hello World!']
app_iter, status, headers = run_wsgi_app(simple_app, {})
assert status == '200 OK'
assert headers == [('Content-Type', 'text/html')]
assert app_iter == ['Hello World!']
def yielding_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
yield 'Hello '
yield 'World!'
app_iter, status, headers = run_wsgi_app(yielding_app, {})
assert status == '200 OK'
assert headers == [('Content-Type', 'text/html')]
assert list(app_iter) == ['Hello ', 'World!']
def test_multiple_cookies(self):
@Request.application
def test_app(request):
response = Response(repr(sorted(request.cookies.items())))
response.set_cookie('test1', 'foo')
response.set_cookie('test2', 'bar')
return response
client = Client(test_app, Response)
resp = client.get('/')
assert resp.data == '[]'
resp = client.get('/')
assert resp.data == "[('test1', u'foo'), ('test2', u'bar')]"
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTestCase))
return suite
|
isislovecruft/arm
|
refs/heads/fix/setup.py-install-issues
|
arm/connections/entries.py
|
1
|
"""
Interface for entries in the connection panel. These consist of two parts: the
entry itself (ie, Tor connection, client circuit, etc) and the lines it
consists of in the listing.
"""
from stem.util import enum
# attributes we can list entries by
ListingType = enum.Enum(("IP_ADDRESS", "IP Address"), "HOSTNAME", "FINGERPRINT", "NICKNAME")
SortAttr = enum.Enum("CATEGORY", "UPTIME", "LISTING", "IP_ADDRESS", "PORT", "HOSTNAME", "FINGERPRINT", "NICKNAME", "COUNTRY")
SORT_COLORS = {
SortAttr.CATEGORY: "red",
SortAttr.UPTIME: "yellow",
SortAttr.LISTING: "green",
SortAttr.IP_ADDRESS: "blue",
SortAttr.PORT: "blue",
SortAttr.HOSTNAME: "magenta",
SortAttr.FINGERPRINT: "cyan",
SortAttr.NICKNAME: "cyan",
SortAttr.COUNTRY: "blue",
}
# maximum number of ports a system can have
PORT_COUNT = 65536
class ConnectionPanelEntry:
"""
Common parent for connection panel entries. This consists of a list of lines
in the panel listing. This caches results until the display indicates that
they should be flushed.
"""
def __init__(self):
self.lines = []
self.flush_cache = True
def getLines(self):
"""
Provides the individual lines in the connection listing.
"""
if self.flush_cache:
self.lines = self._get_lines(self.lines)
self.flush_cache = False
return self.lines
def _get_lines(self, old_results):
# implementation of getLines
for line in old_results:
line.reset_display()
return old_results
def get_sort_values(self, sort_attrs, listing_type):
"""
Provides the value used in comparisons to sort based on the given
attribute.
Arguments:
sort_attrs - list of SortAttr values for the field being sorted on
listing_type - ListingType enumeration for the attribute we're listing
entries by
"""
return [self.get_sort_value(attr, listing_type) for attr in sort_attrs]
def get_sort_value(self, attr, listing_type):
"""
Provides the value of a single attribute used for sorting purposes.
Arguments:
attr - list of SortAttr values for the field being sorted on
listing_type - ListingType enumeration for the attribute we're listing
entries by
"""
if attr == SortAttr.LISTING:
if listing_type == ListingType.IP_ADDRESS:
# uses the IP address as the primary value, and port as secondary
sort_value = self.get_sort_value(SortAttr.IP_ADDRESS, listing_type) * PORT_COUNT
sort_value += self.get_sort_value(SortAttr.PORT, listing_type)
return sort_value
elif listing_type == ListingType.HOSTNAME:
return self.get_sort_value(SortAttr.HOSTNAME, listing_type)
elif listing_type == ListingType.FINGERPRINT:
return self.get_sort_value(SortAttr.FINGERPRINT, listing_type)
elif listing_type == ListingType.NICKNAME:
return self.get_sort_value(SortAttr.NICKNAME, listing_type)
return ""
def reset_display(self):
"""
Flushes cached display results.
"""
self.flush_cache = True
class ConnectionPanelLine:
"""
Individual line in the connection panel listing.
"""
def __init__(self):
# cache for displayed information
self._listing_cache = None
self._listing_cache_args = (None, None)
self._details_cache = None
self._details_cache_args = None
self._descriptor_cache = None
self._descriptor_cache_args = None
def get_listing_prefix(self):
"""
Provides a list of characters to be appended before the listing entry.
"""
return ()
def get_listing_entry(self, width, current_time, listing_type):
"""
Provides a [(msg, attr)...] tuple list for contents to be displayed in the
connection panel listing.
Arguments:
width - available space to display in
current_time - unix timestamp for what the results should consider to be
the current time (this may be ignored due to caching)
listing_type - ListingType enumeration for the highest priority content
to be displayed
"""
if self._listing_cache_args != (width, listing_type):
self._listing_cache = self._get_listing_entry(width, current_time, listing_type)
self._listing_cache_args = (width, listing_type)
return self._listing_cache
def _get_listing_entry(self, width, current_time, listing_type):
# implementation of get_listing_entry
return None
def get_details(self, width):
"""
Provides a list of [(msg, attr)...] tuple listings with detailed
information for this connection.
Arguments:
width - available space to display in
"""
if self._details_cache_args != width:
self._details_cache = self._get_details(width)
self._details_cache_args = width
return self._details_cache
def _get_details(self, width):
# implementation of get_details
return []
def reset_display(self):
"""
Flushes cached display results.
"""
self._listing_cache_args = (None, None)
self._details_cache_args = None
|
RevelSystems/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_erroneous/1_auto.py
|
1155
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
willingc/oh-mainline
|
refs/heads/master
|
vendor/packages/sphinx/tests/test_doctest.py
|
22
|
# -*- coding: utf-8 -*-
"""
test_doctest
~~~~~~~~~~~~
Test the doctest extension.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import StringIO
from util import with_app
status = StringIO.StringIO()
cleanup_called = 0
@with_app(buildername='doctest', status=status)
def test_build(app):
global cleanup_called
cleanup_called = 0
app.builder.build_all()
if app.statuscode != 0:
print >>sys.stderr, status.getvalue()
assert False, 'failures in doctests'
# in doctest.txt, there are two named groups and the default group,
# so the cleanup function must be called three times
assert cleanup_called == 3, 'testcleanup did not get executed enough times'
def cleanup_call():
global cleanup_called
cleanup_called += 1
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/hpx5/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hpx5(AutotoolsPackage):
"""The HPX-5 Runtime System. HPX-5 (High Performance ParalleX) is an
open source, portable, performance-oriented runtime developed at
CREST (Indiana University). HPX-5 provides a distributed
programming model allowing programs to run unmodified on systems
from a single SMP to large clusters and supercomputers with
thousands of nodes. HPX-5 supports a wide variety of Intel and ARM
platforms. It is being used by a broad range of scientific
applications enabling scientists to write code that performs and
scales better than contemporary runtimes."""
homepage = "http://hpx.crest.iu.edu"
url = "https://github.com/adk9/hpx5/archive/v3.1.0.tar.gz"
version('4.1.0', sha256='3f01009f5e517c8dfca266dabb49894d688db5adce09608fb1c877263605a9f8')
version('4.0.0', sha256='e35b1161566a65ffbd875c1413ea97a84be0c7b528a3dee99f5e250b2aecbd19')
version('3.1.0', sha256='359d457a26b87abb415605911d791ce0ff6edbb064bc40b0f830960f8f612b84')
version('3.0.0', sha256='10f14ba198a32787cee05962e346bafb922f74a5135fb09a1ba8c32a1e942800')
version('2.2.0', sha256='e34c7513a287d517e67cce5aa3011474c48718e7860c3860ba1290c702be28a8')
version('2.1.0', sha256='675826f669eeb3eab40947715af8c8495e2b3d299223372431dc01c1f7d5d616')
version('2.0.0', sha256='0278728557b6684aeb86228f44d548ac809302f05a0b9c8b433cdd157629e384')
# Don't second-guess what compiler we are using on Cray
patch("configure.patch", when='@4.0.0')
variant('cuda', default=False, description='Enable CUDA support')
variant('cxx11', default=False, description='Enable C++11 hpx++ interface')
variant('debug', default=False, description='Build debug version of HPX-5')
variant('instrumentation', default=False, description='Enable instrumentation (may affect performance)')
variant('metis', default=False, description='Enable METIS support')
variant('mpi', default=False, description='Enable MPI support')
variant('opencl', default=False, description='Enable OpenCL support')
variant('photon', default=False, description='Enable Photon support')
variant('pic', default=True, description='Produce position-independent code')
depends_on("autoconf", type='build')
depends_on("automake", type='build')
depends_on("hwloc")
depends_on("hwloc +cuda", when='+cuda')
# Note: We could disable CUDA support via "hwloc ~cuda"
depends_on("jemalloc")
# depends_on("libffi")
depends_on("libtool", type='build')
# depends_on("lz4") # hpx5 always builds its own lz4
depends_on("m4", type='build')
depends_on("metis", when='+metis')
depends_on("mpi", when='+mpi')
depends_on("mpi", when='+photon')
depends_on("opencl", when='+opencl')
# depends_on("papi")
depends_on("pkgconfig", type='build')
configure_directory = "hpx"
build_directory = "spack-build"
def configure_args(self):
spec = self.spec
args = [
'--enable-agas', # make this a variant?
'--enable-jemalloc', # make this a variant?
'--enable-percolation', # make this a variant?
# '--enable-rebalancing', # this seems broken
'--with-hwloc=hwloc',
'--with-jemalloc=jemalloc',
# Spack's libffi installs its headers strangely,
# leading to problems
'--with-libffi=contrib',
# '--with-papi=papi', # currently disabled in HPX
]
if '+cxx11' in spec:
args += ['--enable-hpx++']
if '+debug' in spec:
args += ['--enable-debug']
if '+instrumentation' in spec:
args += ['--enable-instrumentation']
if '+mpi' in spec or '+photon' in spec:
# photon requires mpi
args += ['--enable-mpi']
# Choose pkg-config name for MPI library
if '^openmpi' in spec:
args += ['--with-mpi=ompi-cxx']
elif '^mpich' in spec:
args += ['--with-mpi=mpich']
elif '^mvapich2' in spec:
args += ['--with-mpi=mvapich2-cxx']
else:
args += ['--with-mpi=system']
# METIS does not support pkg-config; HPX will pick it up automatically
# if '+metis' in spec:
# args += ['--with-metis=???']
if '+opencl' in spec:
args += ['--enable-opencl']
if '^pocl' in spec:
args += ['--with-opencl=pocl']
else:
args += ['--with-opencl=system']
if '+photon' in spec:
args += ['--enable-photon']
if '+pic' in spec:
args += ['--with-pic']
return args
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/sphinx/sphinx/config.py
|
15
|
# -*- coding: utf-8 -*-
"""
sphinx.config
~~~~~~~~~~~~~
Build configuration file handling.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.locale import l_
from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import bytes, b, execfile_
nonascii_re = re.compile(b(r'[\x80-\xff]'))
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
if sys.version_info >= (3, 0):
CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
"""
Configuration file abstraction.
"""
# the values are: (default, what needs to be rebuilt if changed)
# If you add a value here, don't forget to include it in the
# quickstart.py file template as well as in the docs!
config_values = dict(
# general options
project = ('Python', 'env'),
copyright = ('', 'html'),
version = ('', 'env'),
release = ('', 'env'),
today = ('', 'env'),
today_fmt = (None, 'env'), # the real default is locale-dependent
language = (None, 'env'),
locale_dirs = ([], 'env'),
master_doc = ('contents', 'env'),
source_suffix = ('.rst', 'env'),
source_encoding = ('utf-8-sig', 'env'),
exclude_patterns = ([], 'env'),
# the next three are all deprecated now
unused_docs = ([], 'env'),
exclude_trees = ([], 'env'),
exclude_dirnames = ([], 'env'),
default_role = (None, 'env'),
add_function_parentheses = (True, 'env'),
add_module_names = (True, 'env'),
trim_footnote_reference_space = (False, 'env'),
show_authors = (False, 'env'),
pygments_style = (None, 'html'),
highlight_language = ('python', 'env'),
templates_path = ([], 'html'),
template_bridge = (None, 'html'),
keep_warnings = (False, 'env'),
modindex_common_prefix = ([], 'html'),
rst_epilog = (None, 'env'),
rst_prolog = (None, 'env'),
trim_doctest_flags = (True, 'env'),
primary_domain = ('py', 'env'),
needs_sphinx = (None, None),
nitpicky = (False, 'env'),
nitpick_ignore = ([], 'env'),
# HTML options
html_theme = ('default', 'html'),
html_theme_path = ([], 'html'),
html_theme_options = ({}, 'html'),
html_title = (lambda self: l_('%s %s documentation') %
(self.project, self.release),
'html'),
html_short_title = (lambda self: self.html_title, 'html'),
html_style = (None, 'html'),
html_logo = (None, 'html'),
html_favicon = (None, 'html'),
html_static_path = ([], 'html'),
html_extra_path = ([], 'html'),
# the real default is locale-dependent
html_last_updated_fmt = (None, 'html'),
html_use_smartypants = (True, 'html'),
html_translator_class = (None, 'html'),
html_sidebars = ({}, 'html'),
html_additional_pages = ({}, 'html'),
html_use_modindex = (True, 'html'), # deprecated
html_domain_indices = (True, 'html'),
html_add_permalinks = (u'\u00B6', 'html'),
html_use_index = (True, 'html'),
html_split_index = (False, 'html'),
html_copy_source = (True, 'html'),
html_show_sourcelink = (True, 'html'),
html_use_opensearch = ('', 'html'),
html_file_suffix = (None, 'html'),
html_link_suffix = (None, 'html'),
html_show_copyright = (True, 'html'),
html_show_sphinx = (True, 'html'),
html_context = ({}, 'html'),
html_output_encoding = ('utf-8', 'html'),
html_compact_lists = (True, 'html'),
html_secnumber_suffix = ('. ', 'html'),
html_search_language = (None, 'html'),
html_search_options = ({}, 'html'),
html_search_scorer = ('', None),
# HTML help only options
htmlhelp_basename = (lambda self: make_filename(self.project), None),
# Qt help only options
qthelp_basename = (lambda self: make_filename(self.project), None),
# Devhelp only options
devhelp_basename = (lambda self: make_filename(self.project), None),
# Epub options
epub_basename = (lambda self: make_filename(self.project), None),
epub_theme = ('epub', 'html'),
epub_theme_options = ({}, 'html'),
epub_title = (lambda self: self.html_title, 'html'),
epub_author = ('unknown', 'html'),
epub_language = (lambda self: self.language or 'en', 'html'),
epub_publisher = ('unknown', 'html'),
epub_copyright = (lambda self: self.copyright, 'html'),
epub_identifier = ('unknown', 'html'),
epub_scheme = ('unknown', 'html'),
epub_uid = ('unknown', 'env'),
epub_cover = ((), 'env'),
epub_guide = ((), 'env'),
epub_pre_files = ([], 'env'),
epub_post_files = ([], 'env'),
epub_exclude_files = ([], 'env'),
epub_tocdepth = (3, 'env'),
epub_tocdup = (True, 'env'),
epub_tocscope = ('default', 'env'),
epub_fix_images = (False, 'env'),
epub_max_image_width = (0, 'env'),
epub_show_urls = ('inline', 'html'),
epub_use_index = (lambda self: self.html_use_index, 'html'),
# LaTeX options
latex_documents = (lambda self: [(self.master_doc,
make_filename(self.project) + '.tex',
self.project,
'', 'manual')],
None),
latex_logo = (None, None),
latex_appendices = ([], None),
latex_use_parts = (False, None),
latex_use_modindex = (True, None), # deprecated
latex_domain_indices = (True, None),
latex_show_urls = ('no', None),
latex_show_pagerefs = (False, None),
# paper_size and font_size are still separate values
# so that you can give them easily on the command line
latex_paper_size = ('letter', None),
latex_font_size = ('10pt', None),
latex_elements = ({}, None),
latex_additional_files = ([], None),
latex_docclass = ({}, None),
# now deprecated - use latex_elements
latex_preamble = ('', None),
# text options
text_sectionchars = ('*=-~"+`', 'env'),
text_newlines = ('unix', 'env'),
# manpage options
man_pages = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
'%s %s' % (self.project, self.release),
[], 1)],
None),
man_show_urls = (False, None),
# Texinfo options
texinfo_documents = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
self.project, '',
make_filename(self.project),
'The %s reference manual.' %
make_filename(self.project),
'Python')],
None),
texinfo_appendices = ([], None),
texinfo_elements = ({}, None),
texinfo_domain_indices = (True, None),
texinfo_show_urls = ('footnote', None),
texinfo_no_detailmenu = (False, None),
# linkcheck options
linkcheck_ignore = ([], None),
linkcheck_timeout = (None, None),
linkcheck_workers = (5, None),
linkcheck_anchors = (True, None),
# gettext options
gettext_compact = (True, 'gettext'),
# XML options
xml_pretty = (True, 'env'),
)
def __init__(self, dirname, filename, overrides, tags):
self.overrides = overrides
self.values = Config.config_values.copy()
config = {}
if "extensions" in overrides:
config["extensions"] = overrides["extensions"]
if dirname is not None:
config_file = path.join(dirname, filename)
config['__file__'] = config_file
config['tags'] = tags
olddir = os.getcwd()
try:
# we promise to have the config dir as current dir while the
# config file is executed
os.chdir(dirname)
try:
execfile_(filename, config)
except SyntaxError, err:
raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
self._raw_config = config
# these two must be preinitialized because extensions can add their
# own config values
self.setup = config.get('setup', None)
self.extensions = config.get('extensions', [])
def check_unicode(self, warn):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
'Please use Unicode strings, e.g. %r.' % (name, u'Content')
)
def init_values(self):
config = self._raw_config
for valname, value in self.overrides.iteritems():
if '.' in valname:
realvalname, key = valname.split('.', 1)
config.setdefault(realvalname, {})[key] = value
else:
config[valname] = value
for name in config:
if name in self.values:
self.__dict__[name] = config[name]
del self._raw_config
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
raise AttributeError('No such config value: %s' % name)
default = self.values[name][0]
if hasattr(default, '__call__'):
return default(self)
return default
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def __contains__(self, name):
return name in self.values
|
cesargtz/YecoraOdoo
|
refs/heads/master
|
addons/l10n_fr_hr_payroll/l10n_fr_hr_payroll.py
|
340
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'plafond_secu': fields.float('Plafond de la Securite Sociale', digits_compute=dp.get_precision('Payroll')),
'nombre_employes': fields.integer('Nombre d\'employes'),
'cotisation_prevoyance': fields.float('Cotisation Patronale Prevoyance', digits_compute=dp.get_precision('Payroll')),
'org_ss': fields.char('Organisme de securite sociale'),
'conv_coll': fields.char('Convention collective'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_columns = {
'qualif': fields.char('Qualification'),
'niveau': fields.char('Niveau'),
'coef': fields.char('Coefficient'),
}
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
_columns = {
'payment_mode': fields.char('Mode de paiement'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
prdatur/PyRapidPush
|
refs/heads/master
|
example.py
|
1
|
#!/usr/bin/python
from pyrapidpush import PyRapidPush
import sys
def main(api_key):
p = PyRapidPush(api_key)
# Retrieve group test.
groups = p.get_groups()
print (groups)
if groups.has_key('code'):
if groups['code'] == 200:
for item in groups['data']:
print('Got group: ' + item['group'])
else:
for apikey in groups:
print('Groups for api key: ' + apikey)
if groups[apikey]['code'] == 200:
for item in groups[apikey]['data']:
print('Got group: ' + item['group'])
# Direct notification test.
print(p.notify('python test', 'Test message'))
# Scheduled test, change the given GMT time to a time in future.
print(p.notify('Python test', 'Test message scheduled', 2, 'default', '', '2013-01-26 14:33:00'))
# Broadcast notification test.
print(p.broadcast('python test', 'Test message', 'MY-CHANNEL'))
if __name__ == "__main__":
if len(sys.argv) < 2:
print('You have to provide your API-Key with the first parameter. Example: python test.py YOUR-API-KEY')
else:
main(sys.argv[1])
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Hash/Poly1305.py
|
2
|
# -*- coding: utf-8 -*-
#
# Hash/Poly1305.py - Implements the Poly1305 MAC
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from binascii import unhexlify
from Cryptodome.Util.py3compat import bord, tobytes, _copy_bytes
from Cryptodome.Hash import BLAKE2s
from Cryptodome.Random import get_random_bytes
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
_raw_poly1305 = load_pycryptodome_raw_lib("Cryptodome.Hash._poly1305",
"""
int poly1305_init(void **state,
const uint8_t *r,
size_t r_len,
const uint8_t *s,
size_t s_len);
int poly1305_destroy(void *state);
int poly1305_update(void *state,
const uint8_t *in,
size_t len);
int poly1305_digest(const void *state,
uint8_t *digest,
size_t len);
""")
class Poly1305_MAC(object):
"""An Poly1305 MAC object.
Do not instantiate directly. Use the :func:`new` function.
:ivar digest_size: the size in bytes of the resulting MAC tag
:vartype digest_size: integer
"""
digest_size = 16
def __init__(self, r, s, data):
if len(r) != 16:
raise ValueError("Parameter r is not 16 bytes long")
if len(s) != 16:
raise ValueError("Parameter s is not 16 bytes long")
self._mac_tag = None
state = VoidPointer()
result = _raw_poly1305.poly1305_init(state.address_of(),
c_uint8_ptr(r),
c_size_t(len(r)),
c_uint8_ptr(s),
c_size_t(len(s))
)
if result:
raise ValueError("Error %d while instantiating Poly1305" % result)
self._state = SmartPointer(state.get(),
_raw_poly1305.poly1305_destroy)
if data:
self.update(data)
def update(self, data):
"""Authenticate the next chunk of message.
Args:
data (byte string/byte array/memoryview): The next chunk of data
"""
if self._mac_tag:
raise TypeError("You can only call 'digest' or 'hexdigest' on this object")
result = _raw_poly1305.poly1305_update(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while hashing Poly1305 data" % result)
return self
def copy(self):
raise NotImplementedError()
def digest(self):
"""Return the **binary** (non-printable) MAC tag of the message
authenticated so far.
:return: The MAC tag digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
if self._mac_tag:
return self._mac_tag
bfr = create_string_buffer(16)
result = _raw_poly1305.poly1305_digest(self._state.get(),
bfr,
c_size_t(len(bfr)))
if result:
raise ValueError("Error %d while creating Poly1305 digest" % result)
self._mac_tag = get_raw_buffer(bfr)
return self._mac_tag
def hexdigest(self):
"""Return the **printable** MAC tag of the message authenticated so far.
:return: The MAC tag, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x)
for x in tuple(self.digest())])
def verify(self, mac_tag):
"""Verify that a given **binary** MAC (computed by another party)
is valid.
Args:
mac_tag (byte string/byte string/memoryview): the expected MAC of the message.
Raises:
ValueError: if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=self.digest())
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Verify that a given **printable** MAC (computed by another party)
is valid.
Args:
hex_mac_tag (string): the expected MAC of the message,
as a hexadecimal string.
Raises:
ValueError: if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
self.verify(unhexlify(tobytes(hex_mac_tag)))
def new(**kwargs):
"""Create a new Poly1305 MAC object.
Args:
key (bytes/bytearray/memoryview):
The 32-byte key for the Poly1305 object.
cipher (module from ``Cryptodome.Cipher``):
The cipher algorithm to use for deriving the Poly1305
key pair *(r, s)*.
It can only be ``Cryptodome.Cipher.AES`` or ``Cryptodome.Cipher.ChaCha20``.
nonce (bytes/bytearray/memoryview):
Optional. The non-repeatable value to use for the MAC of this message.
It must be 16 bytes long for ``AES`` and 8 or 12 bytes for ``ChaCha20``.
If not passed, a random nonce is created; you will find it in the
``nonce`` attribute of the new object.
data (bytes/bytearray/memoryview):
Optional. The very first chunk of the message to authenticate.
It is equivalent to an early call to ``update()``.
Returns:
A :class:`Poly1305_MAC` object
"""
cipher = kwargs.pop("cipher", None)
if not hasattr(cipher, '_derive_Poly1305_key_pair'):
raise ValueError("Parameter 'cipher' must be AES or ChaCha20")
cipher_key = kwargs.pop("key", None)
if cipher_key is None:
raise TypeError("You must pass a parameter 'key'")
nonce = kwargs.pop("nonce", None)
data = kwargs.pop("data", None)
if kwargs:
raise TypeError("Unknown parameters: " + str(kwargs))
r, s, nonce = cipher._derive_Poly1305_key_pair(cipher_key, nonce)
new_mac = Poly1305_MAC(r, s, data)
new_mac.nonce = _copy_bytes(None, None, nonce) # nonce may still be just a memoryview
return new_mac
|
akhilaananthram/nupic
|
refs/heads/master
|
nupic/regions/AnomalyRegion.py
|
25
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Region for computing the anomaly score."""
import numpy
from nupic.algorithms import anomaly
from nupic.regions.PyRegion import PyRegion
class AnomalyRegion(PyRegion):
"""Region for computing the anomaly score."""
@classmethod
def getSpec(cls):
return {
"description": ("Region that computes anomaly scores from temporal "
"memory."),
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": "The currently active columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
"predictedColumns": {
"description": "The currently predicted columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
},
"outputs": {
"rawAnomalyScore": {
"description": "The raw anomaly score.",
"dataType": "Real32",
"count": 1,
"regionLevel": True,
"isDefaultOutput": True,
},
},
"parameters": {
},
"commands": {
},
}
def __init__(self, *args, **kwargs):
self.prevPredictedColumns = numpy.zeros([], dtype="float32")
def __eq__(self, other):
for k, v1 in self.__dict__.iteritems():
if not k in other.__dict__:
return False
v2 = getattr(other, k)
if isinstance(v1, numpy.ndarray):
if v1.dtype != v2.dtype:
return False
if not numpy.isclose(v1, v2).all():
return False
else:
if type(v1) != type(v2):
return False
if v1 != v2:
return False
return True
def __ne__(self, other):
return not self == other
@classmethod
def read(cls, proto):
anomalyRegion = object.__new__(cls)
anomalyRegion.prevPredictedColumns = numpy.array(proto.prevPredictedColumns)
return anomalyRegion
def write(self, proto):
proto.prevPredictedColumns = self.prevPredictedColumns.tolist()
def initialize(self, inputs, outputs):
pass
def compute(self, inputs, outputs):
activeColumns = inputs["activeColumns"].nonzero()[0]
rawAnomalyScore = anomaly.computeRawAnomalyScore(
activeColumns, self.prevPredictedColumns)
self.prevPredictedColumns = inputs["predictedColumns"].nonzero()[0]
outputs["rawAnomalyScore"][0] = rawAnomalyScore
|
pquentin/libcloud
|
refs/heads/trunk
|
libcloud/test/common/test_google.py
|
11
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Connection classes.
"""
import datetime
import mock
import os
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.google import (GoogleAuthError,
GoogleAuthType,
GoogleBaseAuthConnection,
GoogleInstalledAppAuthConnection,
GoogleServiceAcctAuthConnection,
GoogleGCEServiceAcctAuthConnection,
GoogleOAuth2Credential,
GoogleBaseConnection,
_utcnow,
_utc_timestamp)
from libcloud.test import MockHttp, LibcloudTestCase
from libcloud.utils.py3 import httplib
# Skip some tests if PyCrypto is unavailable
try:
from Crypto.Hash import SHA256
except ImportError:
SHA256 = None
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PEM_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.pem')
JSON_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.json')
with open(JSON_KEY, 'r') as f:
KEY_STR = json.loads(f.read())['private_key']
GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key')
GCE_PARAMS_PEM_KEY = ('email@developer.gserviceaccount.com', PEM_KEY)
GCE_PARAMS_JSON_KEY = ('email@developer.gserviceaccount.com', JSON_KEY)
GCE_PARAMS_KEY = ('email@developer.gserviceaccount.com', KEY_STR)
GCE_PARAMS_IA = ('client_id', 'client_secret')
GCE_PARAMS_GCE = ('foo', 'bar')
GCS_S3_PARAMS = ('GOOG0123456789ABCXYZ', # GOOG + 16 alphanumeric chars
'0102030405060708091011121314151617181920') # 40 base64 chars
STUB_UTCNOW = _utcnow()
STUB_TOKEN = {
'access_token': 'tokentoken',
'token_type': 'Bearer',
'expires_in': 3600
}
STUB_IA_TOKEN = {
'access_token': 'installedapp',
'token_type': 'Bearer',
'expires_in': 3600,
'refresh_token': 'refreshrefresh'
}
STUB_REFRESH_TOKEN = {
'access_token': 'refreshrefresh',
'token_type': 'Bearer',
'expires_in': 3600
}
STUB_TOKEN_FROM_FILE = {
'access_token': 'token_from_file',
'token_type': 'Bearer',
'expire_time': _utc_timestamp(STUB_UTCNOW +
datetime.timedelta(seconds=3600)),
'expires_in': 3600
}
class MockJsonResponse(object):
def __init__(self, body):
self.object = body
class GoogleTestCase(LibcloudTestCase):
"""
Assists in making Google tests hermetic and deterministic.
Add anything that needs to be mocked here. Create a patcher with the
suffix '_patcher'.
e.g.
_foo_patcher = mock.patch('module.submodule.class.foo', ...)
Patchers are started at setUpClass and stopped at tearDownClass.
Ideally, you should make a note in the thing being mocked, for clarity.
"""
PATCHER_SUFFIX = '_patcher'
_utcnow_patcher = mock.patch(
'libcloud.common.google._utcnow', return_value=STUB_UTCNOW)
_authtype_is_gce_patcher = mock.patch(
'libcloud.common.google.GoogleAuthType._is_gce', return_value=False)
_read_token_file_patcher = mock.patch(
'libcloud.common.google.GoogleOAuth2Credential._get_token_from_file',
return_value=STUB_TOKEN_FROM_FILE
)
_write_token_file_patcher = mock.patch(
'libcloud.common.google.GoogleOAuth2Credential._write_token_to_file')
_ia_get_code_patcher = mock.patch(
'libcloud.common.google.GoogleInstalledAppAuthConnection.get_code',
return_value=1234
)
@classmethod
def setUpClass(cls):
super(GoogleTestCase, cls).setUpClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).start()
@classmethod
def tearDownClass(cls):
super(GoogleTestCase, cls).tearDownClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).stop()
class GoogleBaseAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseAuthConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['foo', 'bar']
kwargs = {'scopes': self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
**kwargs)
def test_scopes(self):
self.assertEqual(self.conn.scopes, 'foo bar')
def test_add_default_headers(self):
old_headers = {}
expected_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'accounts.google.com'}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, expected_headers)
def test_token_request(self):
request_body = {'code': 'asdf', 'client_id': self.conn.user_id,
'client_secret': self.conn.key,
'redirect_uri': self.conn.redirect_uri,
'grant_type': 'authorization_code'}
new_token = self.conn._token_request(request_body)
self.assertEqual(new_token['access_token'],
STUB_IA_TOKEN['access_token'])
exp = STUB_UTCNOW + datetime.timedelta(
seconds=STUB_IA_TOKEN['expires_in'])
self.assertEqual(new_token['expire_time'], _utc_timestamp(exp))
class GoogleInstalledAppAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleInstalledAppAuthConnection
"""
def setUp(self):
GoogleInstalledAppAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['https://www.googleapis.com/auth/foo']
kwargs = {'scopes': self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
**kwargs)
def test_refresh_token(self):
# This token info doesn't have a refresh token, so a new token will be
# requested
token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
'expires_in': 3600}
new_token1 = self.conn.refresh_token(token_info1)
self.assertEqual(new_token1['access_token'],
STUB_IA_TOKEN['access_token'])
# This token info has a refresh token, so it will be able to be
# refreshed.
token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
'expires_in': 3600, 'refresh_token': 'refreshrefresh'}
new_token2 = self.conn.refresh_token(token_info2)
self.assertEqual(new_token2['access_token'],
STUB_REFRESH_TOKEN['access_token'])
# Both sets should have refresh info
self.assertTrue('refresh_token' in new_token1)
self.assertTrue('refresh_token' in new_token2)
class GoogleAuthTypeTest(GoogleTestCase):
def test_guess(self):
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_IA[0]),
GoogleAuthType.IA)
with mock.patch.object(GoogleAuthType, '_is_gce', return_value=True):
# Since _is_gce currently depends on the environment, not on
# parameters, other auths should override GCE. It does not make
# sense for IA auth to happen on GCE, which is why it's left out.
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS[0]),
GoogleAuthType.SA)
self.assertEqual(
GoogleAuthType.guess_type(GCS_S3_PARAMS[0]),
GoogleAuthType.GCS_S3)
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_GCE[0]),
GoogleAuthType.GCE)
class GoogleOAuth2CredentialTest(GoogleTestCase):
def test_init_oauth2(self):
kwargs = {'auth_type': GoogleAuthType.IA}
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
# If there is a viable token file, this gets used first
self.assertEqual(cred.token, STUB_TOKEN_FROM_FILE)
# No token file, get a new token. Check that it gets written to file.
with mock.patch.object(GoogleOAuth2Credential, '_get_token_from_file',
return_value=None):
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
expected = STUB_IA_TOKEN
expected['expire_time'] = cred.token['expire_time']
self.assertEqual(cred.token, expected)
cred._write_token_to_file.assert_called_once_with()
def test_refresh(self):
args = list(GCE_PARAMS) + [GoogleAuthType.GCE]
cred = GoogleOAuth2Credential(*args)
cred._refresh_token = mock.Mock()
# Test getting an unexpired access token.
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
cred.token = {'access_token': 'Access Token!',
'expire_time': _utc_timestamp(tomorrow)}
cred.access_token
self.assertFalse(cred._refresh_token.called)
# Test getting an expired access token.
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
cred.token = {'access_token': 'Access Token!',
'expire_time': _utc_timestamp(yesterday)}
cred.access_token
self.assertTrue(cred._refresh_token.called)
def test_auth_connection(self):
# Test a bogus auth type
self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS,
**{'auth_type': 'XX'})
# Try to create an OAuth2 credential when dealing with a GCS S3
# interoperability auth type.
self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS,
**{'auth_type': GoogleAuthType.GCS_S3})
kwargs = {}
if SHA256:
kwargs['auth_type'] = GoogleAuthType.SA
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_PEM_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
kwargs['auth_type'] = GoogleAuthType.IA
cred2 = GoogleOAuth2Credential(*GCE_PARAMS_IA, **kwargs)
self.assertTrue(isinstance(cred2.oauth2_conn,
GoogleInstalledAppAuthConnection))
kwargs['auth_type'] = GoogleAuthType.GCE
cred3 = GoogleOAuth2Credential(*GCE_PARAMS_GCE, **kwargs)
self.assertTrue(isinstance(cred3.oauth2_conn,
GoogleGCEServiceAcctAuthConnection))
class GoogleBaseConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['https://www.googleapis.com/auth/foo']
kwargs = {'scopes': self.mock_scopes,
'auth_type': GoogleAuthType.IA}
self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
def test_add_default_headers(self):
old_headers = {}
new_expected_headers = {'Content-Type': 'application/json',
'Host': 'www.googleapis.com'}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, new_expected_headers)
def test_pre_connect_hook(self):
old_params = {}
old_headers = {}
auth_str = '%s %s' % (STUB_TOKEN_FROM_FILE['token_type'],
STUB_TOKEN_FROM_FILE['access_token'])
new_expected_params = {}
new_expected_headers = {'Authorization': auth_str}
new_params, new_headers = self.conn.pre_connect_hook(old_params,
old_headers)
self.assertEqual(new_params, new_expected_params)
self.assertEqual(new_headers, new_expected_headers)
def test_encode_data(self):
data = {'key': 'value'}
json_data = '{"key": "value"}'
encoded_data = self.conn.encode_data(data)
self.assertEqual(encoded_data, json_data)
def test_has_completed(self):
body1 = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "DONE",
"targetId": "16211908079305042870"}
body2 = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "RUNNING",
"targetId": "16211908079305042870"}
response1 = MockJsonResponse(body1)
response2 = MockJsonResponse(body2)
self.assertTrue(self.conn.has_completed(response1))
self.assertFalse(self.conn.has_completed(response2))
def test_get_poll_request_kwargs(self):
body = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"selfLink": "https://www.googleapis.com/operations-test"}
response = MockJsonResponse(body)
expected_kwargs = {'action':
'https://www.googleapis.com/operations-test'}
kwargs = self.conn.get_poll_request_kwargs(response, None, {})
self.assertEqual(kwargs, expected_kwargs)
def test_morph_action_hook(self):
self.conn.request_path = '/compute/apiver/project/project-name'
action1 = ('https://www.googleapis.com/compute/apiver/project'
'/project-name/instances')
action2 = '/instances'
expected_request = '/compute/apiver/project/project-name/instances'
request1 = self.conn.morph_action_hook(action1)
request2 = self.conn.morph_action_hook(action2)
self.assertEqual(request1, expected_request)
self.assertEqual(request2, expected_request)
class GoogleAuthMockHttp(MockHttp):
"""
Mock HTTP Class for Google Auth Connections.
"""
json_hdr = {'content-type': 'application/json; charset=UTF-8'}
def _o_oauth2_token(self, method, url, body, headers):
if 'code' in body:
body = json.dumps(STUB_IA_TOKEN)
elif 'refresh_token' in body:
body = json.dumps(STUB_REFRESH_TOKEN)
else:
body = json.dumps(STUB_TOKEN)
return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
tempbottle/restcommander
|
refs/heads/master
|
play-1.2.4/python/Lib/email/header.py
|
54
|
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.Errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset != '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.Utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.Utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
return self._encode_chunks(newchunks, maxlinelen)
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch != ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
|
jazztpt/edx-platform
|
refs/heads/master
|
lms/djangoapps/django_comment_client/tests/group_id.py
|
41
|
import json
import re
from teams.tests.factories import CourseTeamFactory
class GroupIdAssertionMixin(object):
def _data_or_params_cs_request(self, mock_request):
"""
Returns the data or params dict that `mock_request` was called with.
"""
call = [call for call in mock_request.call_args_list if call[0][1].endswith(self.cs_endpoint)][0]
if call[0][0] == "get":
return call[1]["params"]
elif call[0][0] == "post":
return call[1]["data"]
def _assert_comments_service_called_with_group_id(self, mock_request, group_id):
self.assertTrue(mock_request.called)
self.assertEqual(self._data_or_params_cs_request(mock_request)["group_id"], group_id)
def _assert_comments_service_called_without_group_id(self, mock_request):
self.assertTrue(mock_request.called)
self.assertNotIn("group_id", self._data_or_params_cs_request(mock_request))
def _assert_html_response_contains_group_info(self, response):
group_info = {"group_id": None, "group_name": None}
match = re.search(r'"group_id": ([\d]*)', response.content)
if match and match.group(1) != '':
group_info["group_id"] = int(match.group(1))
match = re.search(r'"group_name": "([^&]*)"', response.content)
if match:
group_info["group_name"] = match.group(1)
self._assert_thread_contains_group_info(group_info)
def _assert_json_response_contains_group_info(self, response, extract_thread=None):
"""
:param extract_thread: a function which accepts a dictionary (complete
json response payload) and returns another dictionary (first
occurrence of a thread model within that payload). if None is
passed, the identity function is assumed.
"""
payload = json.loads(response.content)
thread = extract_thread(payload) if extract_thread else payload
self._assert_thread_contains_group_info(thread)
def _assert_thread_contains_group_info(self, thread):
self.assertEqual(thread['group_id'], self.student_cohort.id)
self.assertEqual(thread['group_name'], self.student_cohort.name)
class CohortedTopicGroupIdTestMixin(GroupIdAssertionMixin):
"""
Provides test cases to verify that views pass the correct `group_id` to
the comments service when requesting content in cohorted discussions.
"""
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
"""
Call the view for the implementing test class, constructing a request
from the parameters.
"""
pass
def test_cohorted_topic_student_without_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.student, None, pass_group_id=False)
self._assert_comments_service_called_with_group_id(mock_request, self.student_cohort.id)
def test_cohorted_topic_student_none_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.student, "")
self._assert_comments_service_called_with_group_id(mock_request, self.student_cohort.id)
def test_cohorted_topic_student_with_own_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.student, self.student_cohort.id)
self._assert_comments_service_called_with_group_id(mock_request, self.student_cohort.id)
def test_cohorted_topic_student_with_other_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.student, self.moderator_cohort.id)
self._assert_comments_service_called_with_group_id(mock_request, self.student_cohort.id)
def test_cohorted_topic_moderator_without_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.moderator, None, pass_group_id=False)
self._assert_comments_service_called_without_group_id(mock_request)
def test_cohorted_topic_moderator_none_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.moderator, "")
self._assert_comments_service_called_without_group_id(mock_request)
def test_cohorted_topic_moderator_with_own_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.moderator, self.moderator_cohort.id)
self._assert_comments_service_called_with_group_id(mock_request, self.moderator_cohort.id)
def test_cohorted_topic_moderator_with_other_group_id(self, mock_request):
self.call_view(mock_request, "cohorted_topic", self.moderator, self.student_cohort.id)
self._assert_comments_service_called_with_group_id(mock_request, self.student_cohort.id)
def test_cohorted_topic_moderator_with_invalid_group_id(self, mock_request):
invalid_id = self.student_cohort.id + self.moderator_cohort.id
response = self.call_view(mock_request, "cohorted_topic", self.moderator, invalid_id)
self.assertEqual(response.status_code, 400)
class NonCohortedTopicGroupIdTestMixin(GroupIdAssertionMixin):
"""
Provides test cases to verify that views pass the correct `group_id` to
the comments service when requesting content in non-cohorted discussions.
"""
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
"""
Call the view for the implementing test class, constructing a request
from the parameters.
"""
pass
def test_non_cohorted_topic_student_without_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.student, None, pass_group_id=False)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_student_none_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.student, None)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_student_with_own_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_student_with_other_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.student, self.moderator_cohort.id)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_moderator_without_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.moderator, None, pass_group_id=False)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_moderator_none_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.moderator, None)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_moderator_with_own_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_moderator_with_other_group_id(self, mock_request):
self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.student_cohort.id)
self._assert_comments_service_called_without_group_id(mock_request)
def test_non_cohorted_topic_moderator_with_invalid_group_id(self, mock_request):
invalid_id = self.student_cohort.id + self.moderator_cohort.id
self.call_view(mock_request, "non_cohorted_topic", self.moderator, invalid_id)
self._assert_comments_service_called_without_group_id(mock_request)
def test_team_discussion_id_not_cohorted(self, mock_request):
team = CourseTeamFactory(course_id=self.course.id)
team.add_user(self.student) # pylint: disable=no-member
self.call_view(mock_request, team.discussion_topic_id, self.student, None)
self._assert_comments_service_called_without_group_id(mock_request)
|
popazerty/enigma2-4.3
|
refs/heads/master
|
lib/python/Plugins/Extensions/DVDBurn/TitleList.py
|
9
|
import DVDProject, TitleList, TitleCutter, TitleProperties, ProjectSettings, DVDToolbox, Process
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.Task import job_manager
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.MultiContent import MultiContentEntryText
from Components.Label import MultiColorLabel
from enigma import gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from boxbranding import getMachineBrand, getMachineName
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
class TitleList(Screen, HelpableScreen):
skin = """
<screen name="TitleList" position="center,center" size="560,470" title="DVD Tool" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="title_label" render="Label" position="10,48" size="540,38" font="Regular;18" transparent="1" />
<widget source="error_label" render="Label" position="10,48" size="540,296" zPosition="3" font="Regular;20" transparent="1" />
<widget source="titles" render="Listbox" scrollbarMode="showOnDemand" position="10,86" size="546,296" zPosition="3" transparent="1" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (0, 0), size = (360, 20), font = 0, flags = RT_HALIGN_LEFT, text = 1), # index 1 Title,
MultiContentEntryText(pos = (0, 20), size = (360, 17), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 description,
MultiContentEntryText(pos = (366, 6), size = (152, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 3), # index 3 channel,
MultiContentEntryText(pos = (366, 20), size = (102, 17), font = 1, flags = RT_HALIGN_RIGHT, text = 4), # index 4 begin time,
MultiContentEntryText(pos = (470, 20), size = (48, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 5), # index 5 duration,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 14)],
"itemHeight": 37
}
</convert>
</widget>
<ePixmap pixmap="div-h.png" position="0,390" zPosition="10" size="560,2" />
<ePixmap pixmap="buttons/key_menu.png" position="10,394" size="35,25" alphatest="on" />
<widget source="hint" render="Label" position="50,396" size="540,22" font="Regular;18" halign="left" />
<widget name="medium_label" position="10,420" size="540,22" font="Regular;18" halign="left" foregroundColors="#FFFFFF,#FFFF00,#FF0000" />
<widget source="space_bar_single" render="Progress" position="10,446" size="270,24" borderWidth="1" zPosition="2" backgroundColor="#254f7497" />
<widget source="space_label_single" render="Label" position="10,449" size="270,22" zPosition="3" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
<widget source="space_bar_dual" render="Progress" position="10,446" size="540,24" borderWidth="1" backgroundColor="#254f7497" />
<widget source="space_label_dual" render="Label" position="10,449" size="540,22" zPosition="2" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["titleactions"] = HelpableActionMap(self, "DVDTitleList",
{
"addTitle": (self.addTitle, _("Add a new title"), _("Add title")),
"titleProperties": (self.titleProperties, _("Properties of current title"), _("Title properties")),
"removeCurrentTitle": (self.removeCurrentTitle, _("Remove currently selected title"), _("Remove title")),
"settings": (self.settings, _("Collection settings"), _("Settings")),
"burnProject": (self.askBurnProject, _("Burn DVD"), _("Burn DVD")),
})
self["MovieSelectionActions"] = HelpableActionMap(self, "MovieSelectionActions",
{
"contextMenu": (self.showMenu, _("menu")),
})
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.leave
})
self["key_red"] = StaticText()
self["key_green"] = StaticText(_("Add title"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Settings"))
self["title_label"] = StaticText()
self["error_label"] = StaticText()
self["space_label_single"] = StaticText()
self["space_label_dual"] = StaticText()
self["hint"] = StaticText(_("Advanced options"))
self["medium_label"] = MultiColorLabel()
self["space_bar_single"] = Progress()
self["space_bar_dual"] = Progress()
self["titles"] = List([])
self.previous_size = 0
if project is not None:
self.project = project
else:
self.newProject()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD titlelist"))
def checkBackgroundJobs(self):
for job in job_manager.getPendingJobs():
print "type(job):", type(job)
print "Process.DVDJob:", Process.DVDJob
if type(job) == Process.DVDJob:
self.backgroundJob = job
return
self.backgroundJob = None
def showMenu(self):
menu = []
self.checkBackgroundJobs()
if self.backgroundJob:
j = self.backgroundJob
menu.append(("%s: %s (%d%%)" % (j.getStatustext(), j.name, int(100*j.progress/float(j.end))), self.showBackgroundJob))
menu.append((_("DVD media toolbox"), self.toolbox))
if self.project.settings.output.value == "dvd":
if len(self["titles"].list):
menu.append((_("Burn DVD"), self.burnProject))
elif self.project.settings.output.value == "iso":
menu.append((_("Create DVD-ISO"), self.burnProject))
menu.append((_("Burn existing image to DVD"), self.selectImage))
if len(self["titles"].list):
menu.append((_("Preview menu"), self.previewMenu))
menu.append((_("Edit chapters of current title"), self.editTitle))
menu.append((_("Reset and renumerate title names"), self.resetTitles))
menu.append((_("Exit"), self.leave))
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice:
choice[1]()
def showBackgroundJob(self):
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, self.backgroundJob)
self.backgroundJob = None
def titleProperties(self):
if self.getCurrentTitle():
self.session.openWithCallback(self.updateTitleList, TitleProperties.TitleProperties, self, self.project, self["titles"].getIndex())
def selectImage(self):
self.session.openWithCallback(self.burnISO, ProjectSettings.FileBrowser, "image", self.project.settings)
def newProject(self):
self.project = DVDProject.DVDProject()
if self.loadTemplate():
self.project.session = self.session
self.settingsCB()
def addTitle(self):
from Screens.MovieSelection import MovieSelection
from Components.ActionMap import HelpableActionMap
class DVDMovieSelection(MovieSelection):
skin = """<screen name="DVDMovieSelection" position="center,center" size="560,445" title="Select a movie">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="waitingtext" position="0,45" size="560,395" zPosition="4" font="Regular;22" halign="center" valign="center" />
<widget name="list" position="5,40" size="550,375" zPosition="2" scrollbarMode="showOnDemand" />
<widget name="DescriptionBorder" pixmap="border_eventinfo.png" position="0,316" zPosition="1" size="560,103" transparent="1" alphatest="on" />
<widget source="Service" render="Label" position="5,318" zPosition="1" size="480,35" font="Regular;17" foregroundColor="#cccccc">
<convert type="MovieInfo">ShortDescription</convert>
</widget>
<widget source="Service" render="Label" position="495,318" zPosition="1" size="60,22" font="Regular;17" halign="right">
<convert type="ServiceTime">Duration</convert>
<convert type="ClockToText">AsLength</convert>
</widget>
<widget source="Service" render="Label" position="380,337" zPosition="2" size="175,22" font="Regular;17" halign="right">
<convert type="MovieInfo">RecordServiceName</convert>
</widget>
<widget source="Service" render="Label" position="5,357" zPosition="1" size="550,58" font="Regular;19">
<convert type="EventName">ExtendedDescription</convert>
</widget>
<widget name="freeDiskSpace" position="10,425" size="540,20" font="Regular;19" valign="center" halign="right" />
</screen>"""
def __init__(self, session):
MovieSelection.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Add"))
self["key_yellow"] = StaticText(_("Edit title"))
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.close, _("Close title selection")),
"green": (self.insertWithoutEdit, "insert without cutlist editor"),
"yellow": (self.movieSelected, _("Add a new title"))
})
def updateTags(self):
pass
def doContext(self):
print "context menu forbidden inside DVDBurn to prevent calling multiple instances"
def updateButtons(self):
# the original will hide red/green, and crash...
pass
def insertWithoutEdit(self):
current = self.getCurrent()
if current is not None:
current.edit = False
self.close(current)
def movieSelected(self):
current = self.getCurrent()
if current is not None:
current.edit = True
self.close(current)
self.session.openWithCallback(self.selectedSource, DVDMovieSelection)
def selectedSource(self, source = None):
if source is None:
return None
if not source.getPath().endswith(".ts"):
self.session.open(MessageBox,text = _("You can only burn %s %s recordings!") % (getMachineBrand(), getMachineName()), type = MessageBox.TYPE_ERROR)
return None
t = self.project.addService(source)
try:
editor = source.edit
except AttributeError:
editor = True
self.editTitle(t, editor)
def removeCurrentTitle(self):
title = self.getCurrentTitle()
self.removeTitle(title)
def removeTitle(self, title):
if title is not None:
self.project.titles.remove(title)
self.updateTitleList()
def toolbox(self):
self.session.open(DVDToolbox.DVDToolbox)
def settings(self):
self.session.openWithCallback(self.settingsCB, ProjectSettings.ProjectSettings, self.project)
def settingsCB(self, update=True):
if not update:
return
self.updateTitleList()
def loadTemplate(self):
filename = resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/DreamboxDVD.ddvdp.xml"
if self.project.load(filename):
self["error_label"].setText("")
return True
else:
self["error_label"].setText(self.project.error)
return False
def askBurnProject(self):
if len(self["titles"].list):
self.session.openWithCallback(self.burnProject,MessageBox,text = _("Do you want to burn this collection to DVD medium?"), type = MessageBox.TYPE_YESNO)
def burnProject(self, answer=True):
if not answer:
return
if self.project.settings.authormode.value == "data_ts":
job = Process.DVDdataJob(self.project)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
else:
job = Process.DVDJob(self.project)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def burnISO(self, path, scope, configRef):
if path:
job = Process.DVDisoJob(self.project, path)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
def previewMenu(self):
job = Process.DVDJob(self.project, menupreview=True)
job_manager.in_background = False
job_manager.AddJob(job)
def updateTitleList(self):
list = [ ]
for title in self.project.titles:
list.append((title, title.properties.menutitle.value, title.properties.menusubtitle.value, title.DVBchannel, title.formatDVDmenuText("$D.$M.$Y, $T", 0), title.formatDVDmenuText("$l", 0)))
self["titles"].list = list
self.updateSize()
if len(list):
self["key_red"].text = _("Remove title")
self["key_yellow"].text = _("Title properties")
self["title_label"].text = _("Table of contents for collection") + " \"" + self.project.settings.name.value + "\":"
else:
self["key_red"].text = ""
self["key_yellow"].text = ""
self["title_label"].text = _("Please add titles to the compilation.")
def updateSize(self):
size = self.project.size/(1024*1024)
MAX_DL = self.project.MAX_DL-100
MAX_SL = self.project.MAX_SL-100
print "updateSize:", size, "MAX_DL:", MAX_DL, "MAX_SL:", MAX_SL
if size > MAX_DL:
percent = 100 * size / float(MAX_DL)
self["space_label_dual"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_dual"].value = int(percent)
self["space_bar_single"].value = 100
self["space_label_single"].text = ""
self["medium_label"].setText(_("Exceeds dual layer medium!"))
self["medium_label"].setForegroundColorNum(2)
if self.previous_size < MAX_DL:
self.session.open(MessageBox,text = _("Exceeds dual layer medium!"), type = MessageBox.TYPE_ERROR)
elif size > MAX_SL:
percent = 100 * size / float(MAX_DL)
self["space_label_dual"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_dual"].value = int(percent)
self["space_bar_single"].value = 100
self["space_label_single"].text = ""
self["medium_label"].setText(_("Required medium type:") + " " + _("DUAL LAYER DVD") + ", %d MB " % (MAX_DL - size) + _("free"))
self["medium_label"].setForegroundColorNum(1)
if self.previous_size < MAX_SL:
self.session.open(MessageBox, text = _("Your collection exceeds the size of a single layer medium, you will need a blank dual layer DVD!"), timeout = 10, type = MessageBox.TYPE_INFO)
elif size < MAX_SL:
percent = 100 * size / float(MAX_SL)
self["space_label_single"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_single"].value = int(percent)
self["space_bar_dual"].value = 0
self["space_label_dual"].text = ""
self["medium_label"].setText(_("Required medium type:") + " " + _("SINGLE LAYER DVD") + ", %d MB " % (MAX_SL - size) + _("free"))
self["medium_label"].setForegroundColorNum(0)
self.previous_size = size
def getCurrentTitle(self):
t = self["titles"].getCurrent()
return t and t[0]
def editTitle(self, title = None, editor = True):
t = title or self.getCurrentTitle()
if t is not None:
self.current_edit_title = t
if editor:
self.session.openWithCallback(self.titleEditDone, TitleCutter.TitleCutter, t)
else:
self.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, t)
def titleEditDone(self, cutlist):
t = self.current_edit_title
t.titleEditDone(cutlist)
if t.VideoType != 0:
self.session.openWithCallback(self.DVDformatCB,MessageBox,text = _("The DVD standard doesn't support H.264 (HDTV) video streams. Do you want to create a %s %s format data DVD (which will not play in stand-alone DVD players) instead?") % (getMachineBrand(), getMachineName()), type = MessageBox.TYPE_YESNO)
else:
self.updateTitleList()
def resetTitles(self):
count = 0
for title in self.project.titles:
count += 1
title.initDVDmenuText(count)
self.updateTitleList()
def DVDformatCB(self, answer):
t = self.current_edit_title
if answer:
self.project.settings.authormode.setValue("data_ts")
self.updateTitleList()
else:
self.removeTitle(t)
def leave(self, close = False):
if not len(self["titles"].list) or close:
self.close()
else:
self.session.openWithCallback(self.exitCB, MessageBox,text = _("Your current collection will get lost!") + "\n" + _("Do you really want to exit?"), type = MessageBox.TYPE_YESNO)
def exitCB(self, answer):
print "exitCB", answer
if answer is not None and answer:
self.close()
|
Lordaeron12/padawan
|
refs/heads/master
|
padawan/dashboard/models.py
|
792
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
|
ytjiang/django
|
refs/heads/master
|
tests/model_fields/test_durationfield.py
|
36
|
import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(TestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(TestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
|
keedio/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/debug/doctest_webapp.py
|
50
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#!/usr/bin/env python2.4
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
These are functions for use when doctest-testing a document.
"""
import subprocess
import doctest
import os
import sys
import shutil
import re
import cgi
import rfc822
from cStringIO import StringIO
from paste.util import PySourceColor
here = os.path.abspath(__file__)
paste_parent = os.path.dirname(
os.path.dirname(os.path.dirname(here)))
def run(command):
data = run_raw(command)
if data:
print(data)
def run_raw(command):
"""
Runs the string command, returns any output.
"""
proc = subprocess.Popen(command, shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, env=_make_env())
data = proc.stdout.read()
proc.wait()
while data.endswith('\n') or data.endswith('\r'):
data = data[:-1]
if data:
data = '\n'.join(
[l for l in data.splitlines() if l])
return data
else:
return ''
def run_command(command, name, and_print=False):
output = run_raw(command)
data = '$ %s\n%s' % (command, output)
show_file('shell-command', name, description='shell transcript',
data=data)
if and_print and output:
print(output)
def _make_env():
env = os.environ.copy()
env['PATH'] = (env.get('PATH', '')
+ ':'
+ os.path.join(paste_parent, 'scripts')
+ ':'
+ os.path.join(paste_parent, 'paste', '3rd-party',
'sqlobject-files', 'scripts'))
env['PYTHONPATH'] = (env.get('PYTHONPATH', '')
+ ':'
+ paste_parent)
return env
def clear_dir(dir):
"""
Clears (deletes) the given directory
"""
shutil.rmtree(dir, True)
def ls(dir=None, recurse=False, indent=0):
"""
Show a directory listing
"""
dir = dir or os.getcwd()
fns = os.listdir(dir)
fns.sort()
for fn in fns:
full = os.path.join(dir, fn)
if os.path.isdir(full):
fn = fn + '/'
print(' '*indent + fn)
if os.path.isdir(full) and recurse:
ls(dir=full, recurse=True, indent=indent+2)
default_app = None
default_url = None
def set_default_app(app, url):
global default_app
global default_url
default_app = app
default_url = url
def resource_filename(fn):
"""
Returns the filename of the resource -- generally in the directory
resources/DocumentName/fn
"""
return os.path.join(
os.path.dirname(sys.testing_document_filename),
'resources',
os.path.splitext(os.path.basename(sys.testing_document_filename))[0],
fn)
def show(path_info, example_name):
fn = resource_filename(example_name + '.html')
out = StringIO()
assert default_app is not None, (
"No default_app set")
url = default_url + path_info
out.write('<span class="doctest-url"><a href="%s">%s</a></span><br>\n'
% (url, url))
out.write('<div class="doctest-example">\n')
proc = subprocess.Popen(
['paster', 'serve' '--server=console', '--no-verbose',
'--url=' + path_info],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=_make_env())
stdout, errors = proc.communicate()
stdout = StringIO(stdout)
headers = rfc822.Message(stdout)
content = stdout.read()
for header, value in headers.items():
if header.lower() == 'status' and int(value.split()[0]) == 200:
continue
if header.lower() in ('content-type', 'content-length'):
continue
if (header.lower() == 'set-cookie'
and value.startswith('_SID_')):
continue
out.write('<span class="doctest-header">%s: %s</span><br>\n'
% (header, value))
lines = [l for l in content.splitlines() if l.strip()]
for line in lines:
out.write(line + '\n')
if errors:
out.write('<pre class="doctest-errors">%s</pre>'
% errors)
out.write('</div>\n')
result = out.getvalue()
if not os.path.exists(fn):
f = open(fn, 'wb')
f.write(result)
f.close()
else:
f = open(fn, 'rb')
expected = f.read()
f.close()
if not html_matches(expected, result):
print('Pages did not match. Expected from %s:' % fn)
print('-'*60)
print(expected)
print('='*60)
print('Actual output:')
print('-'*60)
print(result)
def html_matches(pattern, text):
regex = re.escape(pattern)
regex = regex.replace(r'\.\.\.', '.*')
regex = re.sub(r'0x[0-9a-f]+', '.*', regex)
regex = '^%s$' % regex
return re.search(regex, text)
def convert_docstring_string(data):
if data.startswith('\n'):
data = data[1:]
lines = data.splitlines()
new_lines = []
for line in lines:
if line.rstrip() == '.':
new_lines.append('')
else:
new_lines.append(line)
data = '\n'.join(new_lines) + '\n'
return data
def create_file(path, version, data):
data = convert_docstring_string(data)
write_data(path, data)
show_file(path, version)
def append_to_file(path, version, data):
data = convert_docstring_string(data)
f = open(path, 'a')
f.write(data)
f.close()
# I think these appends can happen so quickly (in less than a second)
# that the .pyc file doesn't appear to be expired, even though it
# is after we've made this change; so we have to get rid of the .pyc
# file:
if path.endswith('.py'):
pyc_file = path + 'c'
if os.path.exists(pyc_file):
os.unlink(pyc_file)
show_file(path, version, description='added to %s' % path,
data=data)
def show_file(path, version, description=None, data=None):
ext = os.path.splitext(path)[1]
if data is None:
f = open(path, 'rb')
data = f.read()
f.close()
if ext == '.py':
html = ('<div class="source-code">%s</div>'
% PySourceColor.str2html(data, PySourceColor.dark))
else:
html = '<pre class="source-code">%s</pre>' % cgi.escape(data, 1)
html = '<span class="source-filename">%s</span><br>%s' % (
description or path, html)
write_data(resource_filename('%s.%s.gen.html' % (path, version)),
html)
def call_source_highlight(input, format):
proc = subprocess.Popen(['source-highlight', '--out-format=html',
'--no-doc', '--css=none',
'--src-lang=%s' % format], shell=False,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(input)
result = stdout
proc.wait()
return result
def write_data(path, data):
dir = os.path.dirname(os.path.abspath(path))
if not os.path.exists(dir):
os.makedirs(dir)
f = open(path, 'wb')
f.write(data)
f.close()
def change_file(path, changes):
f = open(os.path.abspath(path), 'rb')
lines = f.readlines()
f.close()
for change_type, line, text in changes:
if change_type == 'insert':
lines[line:line] = [text]
elif change_type == 'delete':
lines[line:text] = []
else:
assert 0, (
"Unknown change_type: %r" % change_type)
f = open(path, 'wb')
f.write(''.join(lines))
f.close()
class LongFormDocTestParser(doctest.DocTestParser):
"""
This parser recognizes some reST comments as commands, without
prompts or expected output, like:
.. run:
do_this(...
...)
"""
_EXAMPLE_RE = re.compile(r"""
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?: (?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*))
|
(?: # This is for longer commands that are prefixed with a reST
# comment like '.. run:' (two colons makes that a directive).
# These commands cannot have any output.
(?:^\.\.[ ]*(?P<run>run):[ ]*\n) # Leading command/command
(?:[ ]*\n)? # Blank line following
(?P<runsource>
(?:(?P<runindent> [ ]+)[^ ].*$)
(?:\n [ ]+ .*)*)
)
|
(?: # This is for shell commands
(?P<shellsource>
(?:^(P<shellindent> [ ]*) [$] .*) # Shell line
(?:\n [ ]* [>] .*)*) # Continuation
\n?
# Want consists of any non-blank lines that do not start with $
(?P<shellwant> (?:(?![ ]*$)
(?![ ]*[$]$)
.*$\n?
)*))
""", re.MULTILINE | re.VERBOSE)
def _parse_example(self, m, name, lineno):
r"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
>>> def parseit(s):
... p = LongFormDocTestParser()
... return p._parse_example(p._EXAMPLE_RE.search(s), '<string>', 1)
>>> parseit('>>> 1\n1')
('1', {}, '1', None)
>>> parseit('>>> (1\n... +1)\n2')
('(1\n+1)', {}, '2', None)
>>> parseit('.. run:\n\n test1\n test2\n')
('test1\ntest2', {}, '', None)
"""
# Get the example's indentation level.
runner = m.group('run') or ''
indent = len(m.group('%sindent' % runner))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('%ssource' % runner).split('\n')
if runner:
self._check_prefix(source_lines[1:], ' '*indent, name, lineno)
else:
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[2:], ' '*indent + '.', name, lineno)
if runner:
source = '\n'.join([sl[indent:] for sl in source_lines])
else:
source = '\n'.join([sl[indent+4:] for sl in source_lines])
if runner:
want = ''
exc_msg = None
else:
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
# @@: Erg, this is the only line I need to change...
output.append(doctest.Example(
source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent') or m.group('runindent')),
options=options))
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
if __name__ == '__main__':
if sys.argv[1:] and sys.argv[1] == 'doctest':
doctest.testmod()
sys.exit()
if not paste_parent in sys.path:
sys.path.append(paste_parent)
for fn in sys.argv[1:]:
fn = os.path.abspath(fn)
# @@: OK, ick; but this module gets loaded twice
sys.testing_document_filename = fn
doctest.testfile(
fn, module_relative=False,
optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE,
parser=LongFormDocTestParser())
new = os.path.splitext(fn)[0] + '.html'
assert new != fn
os.system('rst2html.py %s > %s' % (fn, new))
|
collinjackson/mojo
|
refs/heads/master
|
third_party/cython/src/Cython/Plex/DFA.py
|
102
|
#=======================================================================
#
# Python Lexical Analyser
#
# Converting NFA to DFA
#
#=======================================================================
import Machines
from Machines import LOWEST_PRIORITY
from Transitions import TransitionMap
def nfa_to_dfa(old_machine, debug = None):
"""
Given a nondeterministic Machine, return a new equivalent
Machine which is deterministic.
"""
# We build a new machine whose states correspond to sets of states
# in the old machine. Initially we add a new state corresponding to
# the epsilon-closure of each initial old state. Then we give transitions
# to each new state which are the union of all transitions out of any
# of the corresponding old states. The new state reached on a given
# character is the one corresponding to the set of states reachable
# on that character from any of the old states. As new combinations of
# old states are created, new states are added as needed until closure
# is reached.
new_machine = Machines.FastMachine()
state_map = StateMap(new_machine)
# Seed the process using the initial states of the old machine.
# Make the corresponding new states into initial states of the new
# machine with the same names.
for (key, old_state) in old_machine.initial_states.iteritems():
new_state = state_map.old_to_new(epsilon_closure(old_state))
new_machine.make_initial_state(key, new_state)
# Tricky bit here: we add things to the end of this list while we're
# iterating over it. The iteration stops when closure is achieved.
for new_state in new_machine.states:
transitions = TransitionMap()
for old_state in state_map.new_to_old(new_state):
for event, old_target_states in old_state.transitions.iteritems():
if event and old_target_states:
transitions.add_set(event, set_epsilon_closure(old_target_states))
for event, old_states in transitions.iteritems():
new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states))
if debug:
debug.write("\n===== State Mapping =====\n")
state_map.dump(debug)
return new_machine
def set_epsilon_closure(state_set):
"""
Given a set of states, return the union of the epsilon
closures of its member states.
"""
result = {}
for state1 in state_set:
for state2 in epsilon_closure(state1):
result[state2] = 1
return result
def epsilon_closure(state):
"""
Return the set of states reachable from the given state
by epsilon moves.
"""
# Cache the result
result = state.epsilon_closure
if result is None:
result = {}
state.epsilon_closure = result
add_to_epsilon_closure(result, state)
return result
def add_to_epsilon_closure(state_set, state):
"""
Recursively add to |state_set| states reachable from the given state
by epsilon moves.
"""
if not state_set.get(state, 0):
state_set[state] = 1
state_set_2 = state.transitions.get_epsilon()
if state_set_2:
for state2 in state_set_2:
add_to_epsilon_closure(state_set, state2)
class StateMap(object):
"""
Helper class used by nfa_to_dfa() to map back and forth between
sets of states from the old machine and states of the new machine.
"""
new_machine = None # Machine
old_to_new_dict = None # {(old_state,...) : new_state}
new_to_old_dict = None # {id(new_state) : old_state_set}
def __init__(self, new_machine):
self.new_machine = new_machine
self.old_to_new_dict = {}
self.new_to_old_dict= {}
def old_to_new(self, old_state_set):
"""
Return the state of the new machine corresponding to the
set of old machine states represented by |state_set|. A new
state will be created if necessary. If any of the old states
are accepting states, the new state will be an accepting state
with the highest priority action from the old states.
"""
key = self.make_key(old_state_set)
new_state = self.old_to_new_dict.get(key, None)
if not new_state:
action = self.highest_priority_action(old_state_set)
new_state = self.new_machine.new_state(action)
self.old_to_new_dict[key] = new_state
self.new_to_old_dict[id(new_state)] = old_state_set
#for old_state in old_state_set.keys():
#new_state.merge_actions(old_state)
return new_state
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
for state in state_set:
priority = state.action_priority
if priority > best_priority:
best_action = state.action
best_priority = priority
return best_action
# def old_to_new_set(self, old_state_set):
# """
# Return the new state corresponding to a set of old states as
# a singleton set.
# """
# return {self.old_to_new(old_state_set):1}
def new_to_old(self, new_state):
"""Given a new state, return a set of corresponding old states."""
return self.new_to_old_dict[id(new_state)]
def make_key(self, state_set):
"""
Convert a set of states into a uniquified
sorted tuple suitable for use as a dictionary key.
"""
lst = list(state_set)
lst.sort()
return tuple(lst)
def dump(self, file):
from Transitions import state_set_str
for new_state in self.new_machine.states:
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set)))
|
allotria/intellij-community
|
refs/heads/master
|
python/helpers/tests/generator3_tests/data/SkeletonGeneration/binary_declares_extra_module_that_fails/mod.py
|
12
|
import sys
import lib
class FakeModule(object):
def __getattribute__(self, item):
raise AttributeError
sys.modules['extra'] = FakeModule()
del FakeModule
del lib
del sys
|
SCOAP3/invenio
|
refs/heads/master
|
invenio/legacy/refextract/xml.py
|
15
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import absolute_import
import re
from xml.sax.saxutils import escape as encode_for_xml
from datetime import datetime
from invenio.legacy.refextract.regexs import re_num
from invenio.legacy.docextract.utils import write_message
from invenio.legacy.refextract.config import \
CFG_REFEXTRACT_TAG_ID_REFERENCE, \
CFG_REFEXTRACT_IND1_REFERENCE, \
CFG_REFEXTRACT_IND2_REFERENCE, \
CFG_REFEXTRACT_SUBFIELD_MARKER, \
CFG_REFEXTRACT_SUBFIELD_AUTH, \
CFG_REFEXTRACT_SUBFIELD_TITLE, \
CFG_REFEXTRACT_SUBFIELD_MISC, \
CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY, \
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM, \
CFG_REFEXTRACT_XML_RECORD_OPEN, \
CFG_REFEXTRACT_CTRL_FIELD_RECID, \
CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS, \
CFG_REFEXTRACT_IND1_EXTRACTION_STATS, \
CFG_REFEXTRACT_IND2_EXTRACTION_STATS, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION, \
CFG_REFEXTRACT_VERSION, \
CFG_REFEXTRACT_XML_RECORD_CLOSE, \
CFG_REFEXTRACT_SUBFIELD_URL_DESCR, \
CFG_REFEXTRACT_SUBFIELD_URL, \
CFG_REFEXTRACT_SUBFIELD_DOI, \
CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION, \
CFG_REFEXTRACT_SUBFIELD_QUOTED, \
CFG_REFEXTRACT_SUBFIELD_ISBN, \
CFG_REFEXTRACT_SUBFIELD_PUBLISHER, \
CFG_REFEXTRACT_SUBFIELD_YEAR, \
CFG_REFEXTRACT_SUBFIELD_BOOK
from invenio import config
CFG_INSPIRE_SITE = getattr(config, 'CFG_INSPIRE_SITE', False)
def format_marker(line_marker):
if line_marker:
num_match = re_num.search(line_marker)
if num_match:
line_marker = num_match.group(0)
return line_marker
def create_xml_record(counts, recid, xml_lines, status_code=0):
"""Given a series of MARC XML-ized reference lines and a record-id, write a
MARC XML record to the stdout stream. Include in the record some stats
for the extraction job.
The printed MARC XML record will essentially take the following
structure:
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="999" ind1="C" ind2="5">
[...]
</datafield>
[...]
<datafield tag="999" ind1="C" ind2="6">
<subfield code="a">
Invenio/X.XX.X refextract/X.XX.X-timestamp-err-repnum-title-URL-misc
</subfield>
</datafield>
</record>
Timestamp, error(code), reportnum, title, URL, and misc will are of
course take the relevant values.
@param status_code: (integer)the status of reference-extraction for the
given record: was there an error or not? 0 = no error; 1 = error.
@param count_reportnum: (integer) - the number of institutional
report-number citations found in the document's reference lines.
@param count_title: (integer) - the number of journal title citations
found in the document's reference lines.
@param count_url: (integer) - the number of URL citations found in the
document's reference lines.
@param count_misc: (integer) - the number of sections of miscellaneous
text (i.e. 999C5$m) from the document's reference lines.
@param count_auth_group: (integer) - the total number of author groups
identified ($h)
@param recid: (string) - the record-id of the given document. (put into
001 field.)
@param xml_lines: (list) of strings. Each string in the list contains a
group of MARC XML 999C5 datafields, making up a single reference line.
These reference lines will make up the document body.
@return: The entire MARC XML textual output, plus recognition statistics.
"""
out = []
## Start with the opening record tag:
out += u"%(record-open)s\n" \
% {'record-open': CFG_REFEXTRACT_XML_RECORD_OPEN, }
## Display the record-id controlfield:
out += \
u""" <controlfield tag="%(cf-tag-recid)s">%(recid)d</controlfield>\n""" \
% {'cf-tag-recid' : CFG_REFEXTRACT_CTRL_FIELD_RECID,
'recid' : recid,
}
## Loop through all xml lines and add them to the output string:
out.extend(xml_lines)
## add the 999C6 status subfields:
out += u""" <datafield tag="%(df-tag-ref-stats)s" ind1="%(df-ind1-ref-stats)s" ind2="%(df-ind2-ref-stats)s">
<subfield code="%(sf-code-ref-stats)s">%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s</subfield>
<subfield code="%(sf-code-ref-time)s">%(timestamp)s</subfield>
<subfield code="%(sf-code-ref-version)s">%(version)s</subfield>
</datafield>\n""" \
% {'df-tag-ref-stats' : CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS,
'df-ind1-ref-stats' : CFG_REFEXTRACT_IND1_EXTRACTION_STATS,
'df-ind2-ref-stats' : CFG_REFEXTRACT_IND2_EXTRACTION_STATS,
'sf-code-ref-stats' : CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS,
'sf-code-ref-time' : CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME,
'sf-code-ref-version': CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION,
'version' : CFG_REFEXTRACT_VERSION,
'timestamp' : datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'status' : status_code,
'reportnum' : counts['reportnum'],
'title' : counts['title'],
'author' : counts['auth_group'],
'url' : counts['url'],
'doi' : counts['doi'],
'misc' : counts['misc'],
}
## Now add the closing tag to the record:
out += u"%(record-close)s\n" \
% {'record-close' : CFG_REFEXTRACT_XML_RECORD_CLOSE, }
## Be sure to call this BEFORE compress_subfields
out = filter_processed_references(''.join(out))
## Compress mulitple 'm' subfields in a datafield
out = compress_subfields(out, CFG_REFEXTRACT_SUBFIELD_MISC)
## Compress multiple 'h' subfields in a datafield
out = compress_subfields(out, CFG_REFEXTRACT_SUBFIELD_AUTH)
return out
def build_xml_citations(splitted_citations, line_marker):
return [build_xml_citation(citation_elements, line_marker) \
for citation_elements in splitted_citations]
def build_xml_citation(citation_elements, line_marker, inspire_format=None):
""" Create the MARC-XML string of the found reference information which was taken
from a tagged reference line.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference line (e.g. [19])
@return xml_line: (string) The MARC-XML representation of the list of reference elements
"""
if inspire_format is None:
inspire_format = CFG_INSPIRE_SITE
## Begin the datafield element
xml_line = start_datafield_element(line_marker)
## This will hold the ordering of tags which have been appended to the xml line
## This list will be used to control the desisions involving the creation of new citation lines
## (in the event of a new set of authors being recognised, or strange title ordering...)
line_elements = []
## This is a list which will hold the current 'over-view' of a single reference line,
## as a list of lists, where each list corresponds to the contents of a datafield element
## in the xml mark-up
citation_structure = []
auth_for_ibid = None
for element in citation_elements:
## Before going onto checking 'what' the next element is, handle misc text and semi-colons
## Multiple misc text subfields will be compressed later
## This will also be the only part of the code that deals with MISC tag_typed elements
if element['misc_txt'].strip(".,:;- []"):
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_MISC,
element['misc_txt'].strip(".,:;- []"))
# Now handle the type dependent actions
# TITLE
if element['type'] == "JOURNAL":
# Select the journal title output format
if inspire_format:
# ADD to current datafield
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s,%(volume)s,%(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(element['title']),
'volume' : encode_for_xml(element['volume']),
'page' : encode_for_xml(element['page']),
}
else:
# ADD to current datafield
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s %(volume)s (%(year)s) %(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(element['title']),
'volume' : encode_for_xml(element['volume']),
'year' : encode_for_xml(element['year']),
'page' : encode_for_xml(element['page']),
}
# Now, if there are any extra (numeration based) IBID's after this title
if len(element['extra_ibids']) > 0:
# At least one IBID is present, these are to be outputted each into their own datafield
for ibid in element['extra_ibids']:
# %%%%% Set as NEW citation line %%%%%
(xml_line, auth_for_ibid) = append_datafield_element(line_marker,
citation_structure,
line_elements,
auth_for_ibid,
xml_line)
if inspire_format:
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s,%(volume)s,%(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(ibid['title']),
'volume' : encode_for_xml(ibid['volume']),
'page' : encode_for_xml(ibid['page']),
}
else:
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s %(volume)s (%(year)s) %(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(ibid['title']),
'volume' : encode_for_xml(ibid['volume']),
'year' : encode_for_xml(ibid['year']),
'page' : encode_for_xml(ibid['page']),
}
# Add a Title element to the past elements list, since we last found an IBID
line_elements.append(element)
# REPORT NUMBER
elif element['type'] == "REPORTNUMBER":
# ADD to current datafield
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM,
element['report_num'])
line_elements.append(element)
# URL
elif element['type'] == "URL":
if element['url_string'] == element['url_desc']:
# Build the datafield for the URL segment of the reference line:
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_URL,
element['url_string'])
# Else, in the case that the url string and the description differ in some way, include them both
else:
# Build the datafield for the URL segment of the reference line:
xml_line += """
<subfield code="%(sf-code-ref-url)s">%(url)s</subfield>
<subfield code="%(sf-code-ref-url-desc)s">%(url-desc)s</subfield>""" \
% {'sf-code-ref-url' : CFG_REFEXTRACT_SUBFIELD_URL,
'sf-code-ref-url-desc': CFG_REFEXTRACT_SUBFIELD_URL_DESCR,
'url' : encode_for_xml(element['url_string']),
'url-desc' : encode_for_xml(element['url_desc'])
}
line_elements.append(element)
# DOI
elif element['type'] == "DOI":
## Split on hitting another DOI in the same line
if is_in_line_elements("DOI", line_elements):
## %%%%% Set as NEW citation line %%%%%
xml_line, auth_for_ibid = append_datafield_element(line_marker,
citation_structure,
line_elements,
auth_for_ibid,
xml_line)
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_DOI,
element['doi_string'])
line_elements.append(element)
# AUTHOR
elif element['type'] == "AUTH":
value = element['auth_txt']
if element['auth_type'] == 'incl':
value = "(%s)" % value
if is_in_line_elements("AUTH", line_elements) and line_elements[-1]['type'] != "AUTH":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_MISC,
value)
else:
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_AUTH,
value)
line_elements.append(element)
elif element['type'] == "QUOTED":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_QUOTED,
element['title'])
line_elements.append(element)
elif element['type'] == "ISBN":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_ISBN,
element['ISBN'])
line_elements.append(element)
elif element['type'] == "BOOK":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_QUOTED,
element['title'])
xml_line += '\n <subfield code="%s" />' % \
CFG_REFEXTRACT_SUBFIELD_BOOK
line_elements.append(element)
elif element['type'] == "PUBLISHER":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_PUBLISHER,
element['publisher'])
line_elements.append(element)
elif element['type'] == "YEAR":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_YEAR,
element['year'])
line_elements.append(element)
# Append the author, if needed for an ibid, for the last element
# in the entire line. Don't bother setting the author to be used
# for ibids, since the line is finished
xml_line += check_author_for_ibid(line_elements, auth_for_ibid)[0]
# Close the ending datafield element
xml_line += "\n </datafield>\n"
return xml_line
def append_subfield_element(xml_line, subfield_code, value):
xml_element = '\n <subfield code="' \
'%(sf-code-ref-auth)s">%(value)s</subfield>' % {
'value' : encode_for_xml(value),
'sf-code-ref-auth' : subfield_code,
}
return xml_line + xml_element
def start_datafield_element(line_marker):
""" Start a brand new datafield element with a marker subfield.
@param line_marker: (string) The line marker which will be the sole
content of the newly created marker subfield. This will always be the
first subfield to be created for a new datafield element.
@return: (string) The string holding the relevant datafield and
subfield tags.
"""
marker_subfield = """
<subfield code="%(sf-code-ref-marker)s">%(marker-val)s</subfield>""" \
% {'sf-code-ref-marker': CFG_REFEXTRACT_SUBFIELD_MARKER,
'marker-val' : encode_for_xml(format_marker(line_marker))}
new_datafield = """ <datafield tag="%(df-tag-ref)s" ind1="%(df-ind1-ref)s" ind2="%(df-ind2-ref)s">%(marker-subfield)s""" \
% {'df-tag-ref' : CFG_REFEXTRACT_TAG_ID_REFERENCE,
'df-ind1-ref' : CFG_REFEXTRACT_IND1_REFERENCE,
'df-ind2-ref' : CFG_REFEXTRACT_IND2_REFERENCE,
'marker-subfield': marker_subfield}
return new_datafield
def dump_or_split_author(misc_txt, line_elements):
"""
Given the list of current elements, and misc text, try to decide how to use this
author for splitting heuristics, and see if it is useful. Returning 'dump' indicates
put this author into misc text, since it had been identified as bad. 'split'
indicates split the line and place this author into the fresh datafield. The empty string
indicates add this author as normal to the current xml datafield.
A line will be split using author information in two situations:
1. When there already exists a previous author group in the same line
2. If the only item in the current line is a title, with no misc text
In both situations, the newly found author element is placed into the newly created
datafield.
This method heavily assumes that the first author group found in a single citation is the
most reliable (In accordance with the IEEE standard, which states that authors should
be written at the beginning of a citation, in the overwhelming majority of cases).
@param misc_txt: (string) The misc text for this reference line
@param line_elements: (list) The list of elements found for this current line
@return: (string) The action to take to deal with this author.
"""
## If an author has already been found in this reference line
if is_in_line_elements("AUTH", line_elements):
## If this author group is directly after another author group,
## with minimal misc text between, then this author group is very likely to be wrong.
if line_elements[-1]['type'] == "AUTH" \
and len(misc_txt) < CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION:
return "dump"
## Else, trigger a new reference line
return "split"
## In cases where an author is directly after an alone title (ibid or normal, with no misc),
## Trigger a new reference line
if is_in_line_elements("JOURNAL", line_elements) and len(line_elements) == 1 \
and len(misc_txt) == 0:
return "split"
return ""
def is_in_line_elements(element_type, line_elements):
""" Checks the list of current elements in the line for the given element type """
for i, element in enumerate(line_elements):
if element['type'] == element_type:
return (True, line_elements[i])
return False
def split_on_semi_colon(misc_txt, line_elements, elements_processed, total_elements):
""" Given some misc text, see if there are any semi-colons which may indiciate that
a reference line is in fact two separate citations.
@param misc_txt: (string) The misc_txt to look for semi-colons within.
@param line_elements: (list) The list of single upper-case chars which
represent an element of a reference which has been processed.
@param elements_processed: (integer) The number of elements which have been
*looked at* for this entire reference line, regardless of splits
@param total_elements: (integer) The total number of elements which
have been identified in the *entire* reference line
@return: (string) Dipicting where the semi-colon was found in relation to the
rest of the misc_txt. False if a semi-colon was not found, or one was found
relating to an escaped piece of text.
"""
## If there has already been meaningful information found in the reference
## and there are still elements to be processed beyond the element relating to
## this misc_txt
if (is_in_line_elements("JOURNAL", line_elements) \
or is_in_line_elements("REPORTNUMBER", line_elements) \
or len(misc_txt) >= CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY) \
and elements_processed < total_elements:
if len(misc_txt) >= 4 and \
(misc_txt[-5:] == '&' or misc_txt[-4:] == '<'):
## This is a semi-colon which does not indicate a new citation
return ""
else:
## If a semi-colon is at the end, make sure to append preceeding misc_txt to
## the current datafield element
if misc_txt.strip(" .,")[-1] == ";":
return "after"
## Else, make sure to append the misc_txt to the *newly created datafield element*
elif misc_txt.strip(" .,")[0] == ";":
return "before"
return ""
def check_author_for_ibid(line_elements, author):
""" Given a list of elements for an *entire* reference line, and the current
author element to be used for ibids, check to see if that author element needs
to be inserted into this line, depending on the presence of ibids and whether
or not there is already an author paired with an ibid.
Also, if no ibids are present in the line, see if the author element needs
to be updated, depending on the presence of a normal title and a corresponding
author group.
@param line_elements: List of line elements for the entire processed reference
line
@param author: The current parent author element to be used with an ibid
@return: (tuple) - containing a possible new author subfield, and the parent
author element to be used for future ibids (if any)
"""
## Upon splitting, check for ibids in the previous line,
## If an appropriate author was found, pair it with this ibid.
## (i.e., an author has not been explicitly paired with this ibid already
## and an author exists with the parent title to which this ibid refers)
if is_in_line_elements("JOURNAL", line_elements):
## Get the title element for this line
title_element = is_in_line_elements("JOURNAL", line_elements)[1]
if author != None and not is_in_line_elements("AUTH", line_elements) \
and title_element['is_ibid']:
## Return the author subfield which needs to be appended for an ibid in the line
## No need to reset the author to be used for ibids, since this line holds an ibid
return """
<subfield code="%(sf-code-ref-auth)s">%(authors)s</subfield>""" \
% {'authors' : encode_for_xml(author['auth_txt'].strip('()')),
'sf-code-ref-auth' : CFG_REFEXTRACT_SUBFIELD_AUTH,
}, author
## Set the author for to be used for ibids, when a standard title is present in this line,
## as well as an author
if not title_element['is_ibid'] and is_in_line_elements("AUTH", line_elements):
## Set the author to be used for ibids, in the event that a subsequent ibid is found
## this author element will be repeated.
## This author is only used when an ibid is in a line
## and there is no other author found in the line.
author = is_in_line_elements("AUTH", line_elements)[1]
## If there is no author associated with this head title, clear the author to be used for ibids
elif not title_element['is_ibid']:
author = None
## If an author does not need to be replicated for an ibid, append nothing to the xml line
return "", author
def append_datafield_element(line_marker,
citation_structure,
line_elements,
author,
xml_line):
""" Finish the current datafield element and start a new one, with a new
marker subfield.
@param line_marker: (string) The line marker which will be the sole
content of the newly created marker subfield. This will always be the
first subfield to be created for a new datafield element.
@return new_datafield: (string) The string holding the relevant
datafield and subfield tags.
"""
## Add an author, if one must be added for ibid's, before splitting this line
## Also, if a standard title and an author are both present, save the author for future use
new_datafield, author = check_author_for_ibid(line_elements, author)
xml_line += new_datafield
## Start the new datafield
xml_line += """
</datafield>
<datafield tag="%(df-tag-ref)s" ind1="%(df-ind1-ref)s" ind2="%(df-ind2-ref)s">
<subfield code="%(sf-code-ref-marker)s">%(marker-val)s</subfield>""" \
% {'df-tag-ref' : CFG_REFEXTRACT_TAG_ID_REFERENCE,
'df-ind1-ref' : CFG_REFEXTRACT_IND1_REFERENCE,
'df-ind2-ref' : CFG_REFEXTRACT_IND2_REFERENCE,
'sf-code-ref-marker' : CFG_REFEXTRACT_SUBFIELD_MARKER,
'marker-val' : encode_for_xml(format_marker(line_marker))
}
## add the past elements for end previous citation to the citation_structure list
## (citation_structure is a reference to the initial citation_structure list found in the calling method)
citation_structure.append(line_elements)
## Clear the elements in the referenced list of elements
del line_elements[:]
return xml_line, author
def filter_processed_references(out):
""" apply filters to reference lines found - to remove junk"""
reference_lines = out.split('\n')
# Removes too long and too short m tags
m_restricted, ref_lines = restrict_m_subfields(reference_lines)
if m_restricted:
a_tag = re.compile('\<subfield code=\"a\"\>(.*?)\<\/subfield\>')
for i in range(len(ref_lines)):
# Checks to see that the datafield has the attribute ind2="6",
# Before looking to see if the subfield code attribute is 'a'
if ref_lines[i].find('<datafield tag="999" ind1="C" ind2="6">') != -1 \
and (len(ref_lines) - 1) > i:
# For each line in this datafield element, try to find the subfield whose code attribute is 'a'
while ref_lines[i].find('</datafield>') != -1 and (len(ref_lines) - 1) > i:
i += 1
# <subfield code="a">Invenio/X.XX.X
# refextract/X.XX.X-timestamp-err-repnum-title-URL-misc
# remake the "a" tag for new numbe of "m" tags
if a_tag.search(ref_lines[i]):
data = a_tag.search(ref_lines[i]).group(1)
words1 = data.split()
words2 = words1[-1].split('-')
old_m = int(words2[-1])
words2[-1] = str(old_m - m_restricted)
data1 = '-'.join(words2)
words1[-1] = data1
new_data = ' '.join(words1)
ref_lines[i] = ' <subfield code="a">' + new_data + '</subfield>'
break
new_out = '\n'.join([l for l in [rec.rstrip() for rec in ref_lines] if l])
if len(reference_lines) != len(new_out):
write_message(" * filter results: unfilter references line length is %d and filtered length is %d" \
% (len(reference_lines), len(new_out)), verbose=2)
return new_out
def restrict_m_subfields(reference_lines):
"""Remove complete datafields which hold ONLY a single 'm' subfield,
AND where the misc content is too short or too long to be of use.
Min and max lengths derived by inspection of actual data. """
min_length = 4
max_length = 1024
m_tag = re.compile('\<subfield code=\"m\"\>(.*?)\<\/subfield\>')
filter_list = []
m_restricted = 0
for i in range(len(reference_lines)): # set up initial filter
filter_list.append(1)
for i in range(len(reference_lines)):
if m_tag.search(reference_lines[i]):
if (i - 2) >= 0 and (i + 1) < len(reference_lines):
if reference_lines[i + 1].find('</datafield>') != -1 and \
reference_lines[i - 1].find('<subfield code="o">') != -1 and \
reference_lines[i - 2].find('<datafield') != -1:
## If both of these are true then its a solitary "m" tag
mlength = len(m_tag.search(reference_lines[i]).group(1))
if mlength < min_length or mlength > max_length:
filter_list[i - 2] = filter_list[i - 1] = filter_list[i] = filter_list[i + 1] = 0
m_restricted += 1
new_reference_lines = []
for i in range(len(reference_lines)):
if filter_list[i]:
new_reference_lines.append(reference_lines[i])
return m_restricted, new_reference_lines
def get_subfield_content(line, subfield_code):
""" Given a line (subfield element) and a xml code attribute for a subfield element,
return the contents of the subfield element.
"""
content = line.split('<subfield code="' + subfield_code + '">')[1]
content = content.split('</subfield>')[0]
return content
def compress_subfields(out, subfield_code):
"""
For each datafield, compress multiple subfields of type 'subfield_code' into a single one
e.g. for MISC text, change xml format from:
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1.</subfield>
<subfield code="m">J. Dukelsky, S. Pittel and G. Sierra</subfield>
<subfield code="s">Rev. Mod. Phys. 76 (2004) 643</subfield>
<subfield code="m">and this is some more misc text</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2.</subfield>
<subfield code="m">J. von Delft and D.C. Ralph,</subfield>
<subfield code="s">Phys. Rep. 345 (2001) 61</subfield>
</datafield>
to:
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1.</subfield>
<subfield code="m">J. Dukelsky, S. Pittel and G. Sierra and this is some more misc text</subfield>
<subfield code="s">Rev. Mod. Phys. 76 (2004) 643</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2.</subfield>
<subfield code="m">J. von Delft and D.C. Ralph,</subfield>
<subfield code="s">Phys. Rep. 345 (2001) 61</subfield>
</datafield>
"""
in_lines = out.split('\n')
# hold the subfield compressed version of the xml, line by line
new_rec_lines = []
# Used to indicate when the selected subfield has already been reached
# inside a particular datafield
position = 0
# Where the concatenated misc text is held before appended at the end
content_text = ""
# Components of the misc subfield elements
subfield_start = " <subfield code=\"%s\">" % subfield_code
subfield_end = "</subfield>"
for line in in_lines:
## If reached the end of the datafield
if line.find('</datafield>') != -1:
if len(content_text) > 0:
# Insert the concatenated misc contents back where it was first
# encountered (dont RIGHTstrip semi-colons, as these may be
# needed for & or <)
if subfield_code == 'm':
content_text = content_text.strip(" ,.").lstrip(" ;")
new_rec_lines[position] = new_rec_lines[position] + \
content_text + subfield_end
content_text = ""
position = 0
new_rec_lines.append(line)
# Found subfield in question, concatenate subfield contents
# for this single datafield
elif line.find(subfield_start.strip()) != -1:
if position == 0:
## Save the position of this found subfield
## for later insertion into the same place
new_rec_lines.append(subfield_start)
position = len(new_rec_lines) - 1
new_text = get_subfield_content(line, subfield_code)
if content_text and new_text:
## Append spaces between merged text, if needed
if (content_text[-1] + new_text[0]).find(" ") == -1:
new_text = " " + new_text
content_text += new_text
else:
new_rec_lines.append(line)
## Create the readable file from the list of lines.
new_out = [l.rstrip() for l in new_rec_lines]
return '\n'.join(filter(None, new_out))
|
Bysmyyr/chromium-crosswalk
|
refs/heads/master
|
tools/perf/benchmarks/service_worker.py
|
16
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import page_sets
import re
from core import perf_benchmark
from telemetry import benchmark
from telemetry.core import util
from telemetry.page import action_runner
from telemetry.page import page_test
from telemetry.timeline import async_slice as async_slice_module
from telemetry.timeline import slice as slice_module
from telemetry.value import scalar
from measurements import timeline_controller
from metrics import speedindex
class _ServiceWorkerTimelineMetric(object):
def AddResultsOfCounters(self, process, counter_regex_string, results):
counter_filter = re.compile(counter_regex_string)
for counter_name, counter in process.counters.iteritems():
if not counter_filter.search(counter_name):
continue
total = sum(counter.totals)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_counter_name = counter_name.replace('.', '_')
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name, 'count', total))
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name + '_avg', 'count',
total / float(len(counter.totals))))
def AddResultsOfEvents(
self, process, thread_regex_string, event_regex_string, results):
thread_filter = re.compile(thread_regex_string)
event_filter = re.compile(event_regex_string)
for thread in process.threads.itervalues():
thread_name = thread.name.replace('/', '_')
if not thread_filter.search(thread_name):
continue
filtered_events = []
for event in thread.IterAllEvents():
event_name = event.name.replace('.', '_')
if event_filter.search(event_name):
filtered_events.append(event)
async_events_by_name = collections.defaultdict(list)
sync_events_by_name = collections.defaultdict(list)
for event in filtered_events:
if isinstance(event, async_slice_module.AsyncSlice):
async_events_by_name[event.name].append(event)
elif isinstance(event, slice_module.Slice):
sync_events_by_name[event.name].append(event)
for event_name, event_group in async_events_by_name.iteritems():
times = [e.duration for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
for event_name, event_group in sync_events_by_name.iteritems():
times = [e.self_time for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
def _AddResultOfEvent(self, thread_name, event_name, times, results):
total = sum(times)
biggest_jank = max(times)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_event_name = event_name.replace('.', '_')
full_name = thread_name + '|' + sanitized_event_name
results.AddValue(scalar.ScalarValue(
results.current_page, full_name, 'ms', total))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_max', 'ms', biggest_jank))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_avg', 'ms', total / len(times)))
class _ServiceWorkerMeasurement(page_test.PageTest):
"""Measure Speed Index and TRACE_EVENTs"""
def __init__(self):
super(_ServiceWorkerMeasurement, self).__init__()
self._timeline_controller = timeline_controller.TimelineController()
self._speed_index = speedindex.SpeedIndexMetric()
self._page_open_times = collections.defaultdict(int)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-experimental-web-platform-features'
])
def WillNavigateToPage(self, page, tab):
self._timeline_controller.SetUp(page, tab)
self._timeline_controller.Start(tab)
self._speed_index.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
runner = action_runner.ActionRunner(tab)
# timeline_controller requires creation of at least a single interaction
# record. service_worker should be refactored to follow the
# timeline_based_measurement or it should not re-use timeline_controller
# logic for start & stop tracing.
with runner.CreateInteraction('_DummyInteraction'):
pass
tab.WaitForDocumentReadyStateToBeComplete(40)
self._timeline_controller.Stop(tab, results)
# Retrieve TRACE_EVENTs
timeline_metric = _ServiceWorkerTimelineMetric()
browser_process = self._timeline_controller.model.browser_process
filter_text = '(RegisterServiceWorker|'\
'UnregisterServiceWorker|'\
'ProcessAllocate|'\
'FindRegistrationForDocument|'\
'DispatchFetchEvent)'
timeline_metric.AddResultsOfEvents(
browser_process, 'IOThread', filter_text , results)
# Record Speed Index
def SpeedIndexIsFinished():
return self._speed_index.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speed_index.Stop(page, tab)
# Distinguish the first and second load from the subsequent loads
url = str(page)
chart_prefix = 'page_load'
self._page_open_times[url] += 1
if self._page_open_times[url] == 1:
chart_prefix += '_1st'
elif self._page_open_times[url] == 2:
chart_prefix += '_2nd'
else:
chart_prefix += '_later'
self._speed_index.AddResults(tab, results, chart_prefix)
class _ServiceWorkerMicroBenchmarkMeasurement(page_test.PageTest):
"""Measure JS land values and TRACE_EVENTs"""
def __init__(self):
super(_ServiceWorkerMicroBenchmarkMeasurement, self).__init__()
self._timeline_controller = timeline_controller.TimelineController()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-experimental-web-platform-features'
])
def WillNavigateToPage(self, page, tab):
self._timeline_controller.SetUp(page, tab)
self._timeline_controller.Start(tab)
def ValidateAndMeasurePage(self, page, tab, results):
runner = action_runner.ActionRunner(tab)
# timeline_controller requires creation of at least a single interaction
# record. service_worker should be refactored to follow the
# timeline_based_measurement or it should not re-use timeline_controller
# logic for start & stop tracing.
with runner.CreateInteraction('_DummyInteraction'):
pass
tab.WaitForJavaScriptExpression('window.done', 40)
self._timeline_controller.Stop(tab, results)
# Measure JavaScript-land
json = tab.EvaluateJavaScript('window.results || {}')
for key, value in json.iteritems():
results.AddValue(scalar.ScalarValue(
results.current_page, key, value['units'], value['value']))
# Retrieve TRACE_EVENTs
timeline_metric = _ServiceWorkerTimelineMetric()
browser_process = self._timeline_controller.model.browser_process
filter_text = '(RegisterServiceWorker|'\
'UnregisterServiceWorker|'\
'ProcessAllocate|'\
'FindRegistrationForDocument|'\
'DispatchFetchEvent)'
timeline_metric.AddResultsOfEvents(
browser_process, 'IOThread', filter_text , results)
class ServiceWorkerPerfTest(perf_benchmark.PerfBenchmark):
"""Performance test on public applications using ServiceWorker"""
test = _ServiceWorkerMeasurement
page_set = page_sets.ServiceWorkerPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker'
# Disabled due to redness on the tree. crbug.com/442752
# TODO(horo): Enable after the reference build newer than M39 will be rolled.
@benchmark.Disabled('reference')
class ServiceWorkerMicroBenchmarkPerfTest(perf_benchmark.PerfBenchmark):
"""This test measures the performance of pages using ServiceWorker.
As a page set, two benchamrk pages (many registration, many concurrent
fetching) and one application (Trained-to-thrill:
https://jakearchibald.github.io/trained-to-thrill/) are included. Execution
time of these pages will be shown as Speed Index, and TRACE_EVENTs are
subsidiary information to know more detail performance regression.
"""
test = _ServiceWorkerMicroBenchmarkMeasurement
page_set = page_sets.ServiceWorkerMicroBenchmarkPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker_micro_benchmark'
|
tvalacarta/tvalacarta
|
refs/heads/master
|
python/main-classic/lib/youtube_dl/extractor/popcorntv.py
|
20
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
unified_timestamp,
)
class PopcornTVIE(InfoExtractor):
_VALID_URL = r'https?://[^/]+\.popcorntv\.it/guarda/(?P<display_id>[^/]+)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://animemanga.popcorntv.it/guarda/food-wars-battaglie-culinarie-episodio-01/9183',
'md5': '47d65a48d147caf692ab8562fe630b45',
'info_dict': {
'id': '9183',
'display_id': 'food-wars-battaglie-culinarie-episodio-01',
'ext': 'mp4',
'title': 'Food Wars, Battaglie Culinarie | Episodio 01',
'description': 'md5:b8bea378faae4651d3b34c6e112463d0',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1497610857,
'upload_date': '20170616',
'duration': 1440,
'view_count': int,
},
}, {
'url': 'https://cinema.popcorntv.it/guarda/smash-cut/10433',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id, video_id = mobj.group('display_id', 'id')
webpage = self._download_webpage(url, display_id)
m3u8_url = extract_attributes(
self._search_regex(
r'(<link[^>]+itemprop=["\'](?:content|embed)Url[^>]*>)',
webpage, 'content'
))['href']
formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
title = self._search_regex(
r'<h1[^>]+itemprop=["\']name[^>]*>([^<]+)', webpage,
'title', default=None) or self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<article[^>]+itemprop=["\']description[^>]*>(.+?)</article>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = unified_timestamp(self._html_search_meta(
'uploadDate', webpage, 'timestamp'))
duration = int_or_none(self._html_search_meta(
'duration', webpage), invscale=60)
view_count = int_or_none(self._html_search_meta(
'interactionCount', webpage, 'view count'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
mattjmorrison/Pylint
|
refs/heads/master
|
test/input/func_operators.py
|
10
|
"""check operator use"""
#pylint: disable=C0103
#pylint: disable=W0104
__revision__ = 42
a = 1
a += 5
a = +a
b = ++a
++a
c = (++a) * b
a = 1
a -= 5
b = --a
b = a
--a
c = (--a) * b
|
fin09pcap/freight_forwarder
|
refs/heads/master
|
freight_forwarder/const.py
|
2
|
# -*- coding: utf-8; -*-
from __future__ import unicode_literals
VERSION = "1.0.3-dev"
# docker api
DOCKER_DEFAULT_TIMEOUT = 120
DOCKER_API_VERSION = '1.20'
# docker labels
PROJECT_LABEL = 'com.freight-forwarder.project'
TEAM_LABEL = 'com.freight-forwarder.team'
VERSION_LABEL = 'com.freight-forwarder.version'
GIT_LABEL = 'com.freight-forwarder.git_sha'
TYPE_LABEL = 'com.freight-forwarder.type'
TIMESTAMP_LABEL = 'com.freight-forwarder.time_stamp'
|
dnevels/heekscnc
|
refs/heads/master
|
nc/nclathe_read.py
|
35
|
################################################################################
# nc_read.py
#
# Base class for NC code parsing
#
# Hirutso Enni, 2009-01-13
################################################################################
class Parser:
def __init__(self):
self.currentx = 0.0
self.currenty = 0.0
self.currentz = 0.0
self.absolute_flag = True
############################################################################
## Internals
def files_open(self, name, oname=None):
if (oname == None ):
oname = (name+'.nc.xml')
self.file_in = open(name, 'r')
self.file_out = open(oname, 'w')
self.file_out.write('<?xml version="1.0" ?>\n')
self.file_out.write('<nccode>\n')
def files_close(self):
self.file_out.write('</nccode>\n')
self.file_in.close()
self.file_out.close()
def readline(self):
self.line = self.file_in.readline().rstrip()
if (len(self.line)) : return True
else : return False
def write(self, s):
self.file_out.write(s)
############################################################################
## Xml
def begin_ncblock(self):
self.file_out.write('\t<ncblock>\n')
def end_ncblock(self):
self.file_out.write('\t</ncblock>\n')
def add_text(self, s, col=None, cdata=False):
s.replace('&', '&')
s.replace('"', '"')
s.replace('<', '<')
s.replace('>', '>')
if (cdata) : (cd1, cd2) = ('<![CDATA[', ']]>')
else : (cd1, cd2) = ('', '')
if (col != None) : self.file_out.write('\t\t<text col="'+col+'">'+cd1+s+cd2+'</text>\n')
else : self.file_out.write('\t\t<text>'+cd1+s+cd2+'</text>\n')
def set_mode(self, units=None):
self.file_out.write('\t\t<mode')
if (units != None) : self.file_out.write(' units="'+str(units)+'"')
self.file_out.write(' />\n')
def set_tool(self, number=None):
self.file_out.write('\t\t<tool')
if (number != None) :
self.file_out.write(' number="'+str(number)+'"')
self.file_out.write(' />\n')
def begin_path(self, col=None):
if (col != None) : self.file_out.write('\t\t<path col="'+col+'">\n')
else : self.file_out.write('\t\t<path>\n')
def end_path(self):
self.file_out.write('\t\t</path>\n')
def add_line(self, x=None, y=None, z=None, a=None, b=None, c=None):
if (x == None and y == None and z == None and a == None and b == None and c == None) : return
self.file_out.write('\t\t\t<line')
if (x != None) :
if self.absolute_flag: self.currentx = x
else: self.currentx = self.currentx + x
self.file_out.write(' y="%.6f"' % (self.currentx/2))
if (y != None) :
if self.absolute_flag: self.currenty = y
else: self.currenty = self.currenty + y
#self.file_out.write(' y="%.6f"' % self.currenty)
if (z != None) :
if self.absolute_flag: self.currentz = z
else: self.currentz = self.currentz + z
self.file_out.write(' x="%.6f"' % self.currentz)
if (a != None) : self.file_out.write(' a="%.6f"' % a)
if (b != None) : self.file_out.write(' b="%.6f"' % b)
if (c != None) : self.file_out.write(' c="%.6f"' % c)
self.file_out.write(' />\n')
def add_lathe_increment_line(self, u=None, w=None):
# needed for representing U and W moves in lathe code- these are non modal incremental moves
# U == X and W == Z
if (u == None and w == None ) : return
self.file_out.write('\t\t\t<line')
if (u != None) :
self.currentx = self.currentx + u
self.file_out.write(' y="%.6f"' % (self.currentx/2))
if (w != None) :
self.currentz = self.currentz + w
self.file_out.write(' x="%.6f"' % self.currentz)
self.file_out.write(' />\n')
def add_arc(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None, d=None):
if (x == None and y == None and z == None and i == None and j == None and k == None and r == None and d == None) : return
self.file_out.write('\t\t\t<arc')
if (x != None) :
if self.absolute_flag: self.currentx = x
else: self.currentx = self.currentx + x
self.file_out.write(' y="%.6f"' % (self.currentx/2))
if (y != None) :
if self.absolute_flag: self.currenty = y
else: self.currenty = self.currenty + y
#self.file_out.write(' y="%.6f"' % self.currenty)
if (z != None) :
if self.absolute_flag: self.currentz = z
else: self.currentz = self.currentz + z
self.file_out.write(' x="%.6f"' % self.currentz)
#if (j != None) : self.file_out.write(' i="%.6f"' % j)
#if (i != None) : self.file_out.write(' j="%.6f"' % i)
#if (k != None) : self.file_out.write(' k="%.6f"' % k)
if (k != None) : self.file_out.write(' i="%.6f"' % k)
if (i != None) : self.file_out.write(' j="%.6f"' % i)
if (j != None) : self.file_out.write(' k="%.6f"' % j)
if (r != None) : self.file_out.write(' r="%.6f"' % r)
if (d != None) : self.file_out.write(' d="%i"' % d)
self.file_out.write(' />\n')
def incremental(self):
self.absolute_flag = False
def absolute(self):
self.absolute_flag = True
|
FujiZ/ns-3
|
refs/heads/master
|
src/virtual-net-device/bindings/callbacks_list.py
|
40
|
callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Address const&', 'ns3::Address const&', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
laiqiqi886/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/encodings/shift_jisx0213.py
|
816
|
#
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
phillipbroberts/scotch
|
refs/heads/master
|
env/Lib/encodings/unicode_escape.py
|
852
|
""" Python 'unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_escape_encode
decode = codecs.unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
jessp01/server
|
refs/heads/IX-9.19.2
|
plugins/content/document/scripts/pdfCreatorCmd.py
|
7
|
import ntsecuritycon
import win32security
import pywintypes
import win32print
import win32con
import win32api
import os.path
import ctypes
import time
import sys
def getDefaultPrinter():
try:
return win32print.GetDefaultPrinter()
except RuntimeError: # The default printer was not found.
return None
TH32CS_SNAPPROCESS = 0x00000002
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.c_ulong),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
def getProcessList():
# See http://msdn2.microsoft.com/en-us/library/ms686701.aspx
CreateToolhelp32Snapshot = ctypes.windll.kernel32.\
CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
if Process32First(hProcessSnap,
ctypes.byref(pe32)) == win32con.FALSE:
print >> sys.stderr, "Failed getting first process."
return
while True:
yield (pe32.szExeFile, pe32.th32ProcessID)
if Process32Next(hProcessSnap, ctypes.byref(pe32)) == win32con.FALSE:
break
CloseHandle(hProcessSnap)
def adjustPrivilege(priv, enable = True):
flags = ntsecuritycon.TOKEN_ADJUST_PRIVILEGES | ntsecuritycon.TOKEN_QUERY
htoken = win32security.OpenProcessToken(win32api.GetCurrentProcess(), flags)
id = win32security.LookupPrivilegeValue(None, priv)
if enable:
newPrivileges = [(id, ntsecuritycon.SE_PRIVILEGE_ENABLED)]
else:
newPrivileges = [(id, 0)]
win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)
win32api.CloseHandle(htoken)
def killProcess(processId):
adjustPrivilege(ntsecuritycon.SE_DEBUG_NAME)
try:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, processId)
if handle:
win32api.TerminateProcess(handle, 0)
win32api.CloseHandle(handle)
except pywintypes.error:
pass
def killProcessesByName(exeNames):
result = False
for (exeName, processId) in getProcessList():
if exeName.lower() in exeNames:
print 'Killing %s %s' % (exeName, processId)
killProcess(processId)
result = True
return result
if len(sys.argv) < 4:
print 'wrong usage of this script. usage: %s {inputFile} {outFile} [--readOnly]' % os.path.dirname(__file__);
sys.exit(1)
# build the command line
if sys.argv[-1] == '--readonly':
readOnly = True
inputFile = sys.argv[-3]
outputFile = sys.argv[-2]
commandParams = sys.argv[1:-3]
else:
readOnly = False
inputFile = sys.argv[-2]
outputFile = sys.argv[-1]
commandParams = sys.argv[1:-2]
# clean up any previous convert leftovers
if killProcessesByName(['powerpnt.exe', 'excel.exe', 'winword.exe', 'pdfcreator.exe', 'soffice.exe']):
time.sleep(5)
clearCacheCmd = '%s /CLEARCACHE /NoStart' % ' '.join(commandParams)
print '\nclearing cache: %s' % clearCacheCmd
os.system(clearCacheCmd)
# make sure the default printer is set appropriately
PDF_CREATOR_PRINTER = 'PDFCreator'
if getDefaultPrinter() != PDF_CREATOR_PRINTER:
print 'setting default printer to %s' % PDF_CREATOR_PRINTER
win32print.SetDefaultPrinter(PDF_CREATOR_PRINTER)
# build the command line
commandParams.append('/NoStart')
inputFileExt = os.path.splitext(inputFile)[1].lower()
if readOnly and inputFileExt == '.pdf':
commandParams.append('/IF"%s"' % inputFile)
commandParams.append('/OF"%s"' % outputFile)
else:
commandParams.append('/PF"%s"' % inputFile)
command = ' '.join(commandParams)
# execute the command
print '\ncommand: %s' % command
os.system(command)
# wait until the printer queue becomes empty
printer = win32print.OpenPrinter('PDFCreator')
while True:
if len(win32print.EnumJobs(printer, 0, 1, 2)) == 0:
break
time.sleep(.5)
|
trezor/micropython
|
refs/heads/trezor-v1.12
|
tests/cmdline/repl_emacs_keys.py
|
100
|
# REPL tests of GNU-ish readline navigation
# history buffer navigation
1
2
3
# input line motion
t = 12
'boofarfbar'
|
blindroot/django
|
refs/heads/master
|
tests/postgres_tests/array_default_migrations/0001_initial.py
|
377
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='IntegerArrayDefaultModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None)),
],
options={
},
bases=(models.Model,),
),
]
|
jkshaver/virtualenv-1.8.2
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/utils/version.py
|
85
|
import django
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
|
rosmo/ansible
|
refs/heads/devel
|
test/sanity/pylint/plugins/deprecated.py
|
13
|
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from distutils.version import LooseVersion
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from ansible.release import __version__ as ansible_version_raw
MSGS = {
'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-version",
"Used when a call to Display.deprecated specifies a version "
"less than or equal to the current version of Ansible",
{'minversion': (2, 6)}),
'E9502': ("Display.deprecated call without a version",
"ansible-deprecated-no-version",
"Used when a call to Display.deprecated does not specify a "
"version",
{'minversion': (2, 6)}),
'E9503': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"version number",
{'minversion': (2, 6)}),
}
ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
def _get_expr_name(node):
"""Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
"""
try:
return node.func.expr.attrname
except AttributeError:
# If this fails too, we'll let it raise, the caller should catch it
return node.func.expr.name
class AnsibleDeprecatedChecker(BaseChecker):
"""Checks for Display.deprecated calls to ensure that the ``version``
has not passed or met the time for removal
"""
__implements__ = (IAstroidChecker,)
name = 'deprecated'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
version = None
try:
if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
node.func.attrname == 'deprecate' and 'module' in _get_expr_name(node)):
if node.keywords:
for keyword in node.keywords:
if len(node.keywords) == 1 and keyword.arg is None:
# This is likely a **kwargs splat
return
elif keyword.arg == 'version':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
version = keyword.value.value
if not version:
try:
version = node.args[1].value
except IndexError:
self.add_message('ansible-deprecated-no-version', node=node)
return
try:
if ANSIBLE_VERSION >= LooseVersion(str(version)):
self.add_message('ansible-deprecated-version', node=node, args=(version,))
except ValueError:
self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
except AttributeError:
# Not the type of node we are interested in
pass
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleDeprecatedChecker(linter))
|
roshantha9/AbstractManycoreSim
|
refs/heads/master
|
src/libMappingAndScheduling/Decentralised/PSAlgorithmViewer.py
|
1
|
import pprint
import sys
import itertools
import simpy
import math, random
from operator import itemgetter
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import time
from SimParams import SimParams
from libDebug.Debug import Debug, DebugCat
#############################################################################################
## PS Algo main class
#############################################################################################
class PSAlgorithmViewer:
def __init__(self, env, RMInstance):
self.label = "PSAlgorithmViewer"
self.env = env
self.RMInstance = RMInstance
# figure instances
self.fig_ax_pcolors = None
# process instance
self.process_instance = None
# initialise algorithm
def init(self):
if(SimParams.PSALGO_VIEWER_ENABLED == True):
plt.ion()
empty_array = np.array(np.zeros((SimParams.NOC_H, SimParams.NOC_W)))
## pcolor plots
# 0-plevel
# 1-qn-pos
# 2-qn-count
# 3-slack amount
mesh_arr = []
cb_arr = []
f, axarr = plt.subplots(4, 2)
pc_mesh = axarr[0,0].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[0,0].set_title('plevel')
cb = f.colorbar(pc_mesh, ax=axarr[0,0])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[0,1].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[0,1].set_title('qn-pos')
cb = f.colorbar(pc_mesh, ax=axarr[0,1])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[1,0].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[1,0].set_title('qn-count')
cb = f.colorbar(pc_mesh, ax=axarr[1,0])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[1,1].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[1,1].set_title('slack-amnt')
cb = f.colorbar(pc_mesh, ax=axarr[1,1])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[2,0].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[2,0].set_title('nlatetasks(norm)')
cb = f.colorbar(pc_mesh, ax=axarr[2,0])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[2,1].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[2,1].set_title('ntasks')
cb = f.colorbar(pc_mesh, ax=axarr[2,1])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[3,0].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[3,0].set_title('thrhld-qn')
cb = f.colorbar(pc_mesh, ax=axarr[3,0])
cb_arr.append(cb)
plt.axis('off')
pc_mesh = axarr[3,1].pcolormesh(empty_array, cmap=plt.gray(), vmin=0, vmax=np.amax(empty_array), edgecolors='r')
mesh_arr.append(pc_mesh)
axarr[3,1].set_title('imprtd-tasks')
cb = f.colorbar(pc_mesh, ax=axarr[3,1])
cb_arr.append(cb)
plt.axis('off')
self.fig_ax_pcolors = {
'fig' : f,
'axes': axarr,
'pc_meshes' : mesh_arr,
'col_bars' : cb_arr
}
# start process
self.process_instance = self.env.process(self.updatelevels())
def updatelevels(self):
while True:
if(self.env.now > 0):
# update figure title with timestamp
title = self._getPSAlgoTimestamps()
if title != None:
self.fig_ax_pcolors['fig'].canvas.set_window_title(title)
new_data = self._getPcolorData()
# print "----"
# pprint.pprint(new_data)
# print "----"
# plevels
if(np.amin(new_data['newdata_plevels']) == np.amax(new_data['newdata_plevels'])):
if(np.amin(new_data['newdata_plevels']) > np.amax(new_data['newdata_plevels'])):
val_min = np.amax(new_data['newdata_plevels'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_plevels'])
else:
val_min = np.amin(new_data['newdata_plevels'])
val_max = np.amax(new_data['newdata_plevels'])
self.fig_ax_pcolors['pc_meshes'][0].set_array(new_data['newdata_plevels'].ravel())
self.fig_ax_pcolors['pc_meshes'][0].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][0].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][0].draw_all()
# qnpos
self.fig_ax_pcolors['pc_meshes'][1].set_array(new_data['newdata_qnpos'].ravel())
self.fig_ax_pcolors['pc_meshes'][1].set_clim(vmin=1, vmax=2)
self.fig_ax_pcolors['col_bars'][1].set_clim(vmin=1, vmax=2)
plt.draw()
self.fig_ax_pcolors['col_bars'][1].draw_all()
# qncount
if(np.amin(new_data['newdata_qncount']) == np.amax(new_data['newdata_qncount'])):
if(np.amin(new_data['newdata_qncount']) > np.amax(new_data['newdata_qncount'])):
val_min = np.amax(new_data['newdata_qncount'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_qncount'])
else:
val_min = np.amin(new_data['newdata_qncount'])
val_max = np.amax(new_data['newdata_qncount'])
self.fig_ax_pcolors['pc_meshes'][2].set_array(new_data['newdata_qncount'].ravel())
self.fig_ax_pcolors['pc_meshes'][2].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][2].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][2].draw_all()
# slack
if(np.amin(new_data['newdata_slack']) == np.amax(new_data['newdata_slack'])):
if(np.amin(new_data['newdata_slack']) > np.amax(new_data['newdata_slack'])):
val_min = np.amax(new_data['newdata_slack'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_slack'])
else:
val_min = np.amin(new_data['newdata_slack'])
val_max = np.amax(new_data['newdata_slack'])
self.fig_ax_pcolors['pc_meshes'][3].set_array(new_data['newdata_slack'].ravel())
self.fig_ax_pcolors['pc_meshes'][3].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][3].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][3].draw_all()
# numlatetasks
if(np.amin(new_data['newdata_numlatetasks']) == np.amax(new_data['newdata_numlatetasks'])):
if(np.amin(new_data['newdata_numlatetasks']) > np.amax(new_data['newdata_numlatetasks'])):
val_min = np.amax(new_data['newdata_numlatetasks'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_numlatetasks'])
else:
val_min = np.amin(new_data['newdata_numlatetasks'])
val_max = np.amax(new_data['newdata_numlatetasks'])
self.fig_ax_pcolors['pc_meshes'][4].set_array(new_data['newdata_numlatetasks'].ravel())
self.fig_ax_pcolors['pc_meshes'][4].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][4].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][4].draw_all()
# numtasks
if(np.amin(new_data['newdata_tqsize']) == np.amax(new_data['newdata_tqsize'])):
if(np.amin(new_data['newdata_tqsize']) > np.amax(new_data['newdata_tqsize'])):
val_min = np.amax(new_data['newdata_tqsize'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_tqsize'])
else:
val_min = np.amin(new_data['newdata_tqsize'])
val_max = np.amax(new_data['newdata_tqsize'])
self.fig_ax_pcolors['pc_meshes'][5].set_array(new_data['newdata_tqsize'].ravel())
self.fig_ax_pcolors['pc_meshes'][5].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][5].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][5].draw_all()
# thresholdqn
# if(np.amin(new_data['newdata_thresholdqn']) == np.amax(new_data['newdata_thresholdqn'])):
# if(np.amin(new_data['newdata_thresholdqn']) > np.amax(new_data['newdata_thresholdqn'])):
# val_min = np.amax(new_data['newdata_thresholdqn'])
# val_max = 0.0
# else:
# val_min = 0.0
# val_max = np.amax(new_data['newdata_thresholdqn'])
# else:
val_min = np.amin(new_data['newdata_thresholdqn'])
val_max = np.amax(new_data['newdata_thresholdqn'])
print val_min
print val_max
self.fig_ax_pcolors['pc_meshes'][6].set_array(new_data['newdata_thresholdqn'].ravel())
self.fig_ax_pcolors['pc_meshes'][6].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][6].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][6].draw_all()
# imported tasks
if(np.amin(new_data['newdata_numimportedtasks']) == np.amax(new_data['newdata_numimportedtasks'])):
if(np.amin(new_data['newdata_numimportedtasks']) > np.amax(new_data['newdata_numimportedtasks'])):
val_min = np.amax(new_data['newdata_numimportedtasks'])
val_max = 0.0
else:
val_min = 0.0
val_max = np.amax(new_data['newdata_numimportedtasks'])
else:
val_min = np.amin(new_data['newdata_numimportedtasks'])
val_max = np.amax(new_data['newdata_numimportedtasks'])
self.fig_ax_pcolors['pc_meshes'][7].set_array(new_data['newdata_numimportedtasks'].ravel())
self.fig_ax_pcolors['pc_meshes'][7].set_clim(vmin=val_min, vmax=val_max)
self.fig_ax_pcolors['col_bars'][7].set_clim(vmin=val_min, vmax=val_max)
plt.draw()
self.fig_ax_pcolors['col_bars'][7].draw_all()
# self.fig_ax_pcolors['axes'][0,0].draw_artist(self.fig_ax_pcolors['pc_meshes'][0])
# self.fig_ax_pcolors['axes'][0,1].draw_artist(self.fig_ax_pcolors['pc_meshes'][1])
# self.fig_ax_pcolors['axes'][1,0].draw_artist(self.fig_ax_pcolors['pc_meshes'][2])
# self.fig_ax_pcolors['axes'][1,1].draw_artist(self.fig_ax_pcolors['pc_meshes'][3])
if(SimParams.PSALGO_VIEWER_BLOCKONUSERINPUT == True):
foo = raw_input("- press any key to continue -\n")
plt.pause(0.01)
time.sleep(0.01)
else:
plt.pause(0.01)
time.sleep(0.01)
plt.show()
#print "here 00000000000000"
# wait for TQN seconds
yield self.env.timeout(SimParams.PSALGO_VIEWER_SAMPLERATE)
def _getPcolorData(self):
all_node_plevels= []
all_node_qnpos = []
all_node_qncount = []
all_node_slack = []
all_node_numlatetasks = []
all_node_tqsize = []
all_node_thresholdqn = []
all_node_numimportedtasks = []
all_node_norm_numlatetasks = []
for each_node in self.RMInstance.node_network.get_Nodes():
# plevels
all_node_plevels.append(each_node.psalgoprops.get_pheramone_level())
# qn pos
all_node_qnpos.append(each_node.psalgoprops.get_node_type())
# qncount
all_node_qncount.append(each_node.psalgoprops.get_qn_count())
# slack amount
all_node_slack.append(each_node.calculate_SystemSlack(normalised=True))
# number of late tasks
num_late_tasks = each_node.numLateTasksInTQ(lateness_ratio=SimParams.DYNAMIC_TASK_REMAPPING_TASK_LATE_ESTIM_LATENESS_RATIO)
all_node_numlatetasks.append(num_late_tasks)
# number of tasks in tq
all_node_tqsize.append(each_node.get_NumTasksInTQ())
# thresholdqn
all_node_thresholdqn.append(each_node.psalgoprops.get_threshold_qn())
# imported tasks - due to remapping
all_node_numimportedtasks.append(each_node.totalImportedTasks())
# normalised_num_late_tasks
if each_node.get_NumTasksInTQ() >0:
all_node_norm_numlatetasks.append(float(float(num_late_tasks)/float(each_node.get_NumTasksInTQ())))
else:
all_node_norm_numlatetasks.append(float(0.0))
all_node_plevels_reshaped = np.array(np.reshape(all_node_plevels, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_qnpos_reshaped = np.array(np.reshape(all_node_qnpos, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_qncount_reshaped = np.array(np.reshape(all_node_qncount, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_slack_reshaped = np.array(np.reshape(all_node_slack, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_numlatetasks_reshaped = np.array(np.reshape(all_node_numlatetasks, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_tqsize_reshaped = np.array(np.reshape(all_node_tqsize, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_thresholdqn_reshaped = np.array(np.reshape(all_node_thresholdqn, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_numimportedtasks_reshaped = np.array(np.reshape(all_node_numimportedtasks, (SimParams.NOC_H, SimParams.NOC_W)))
all_node_norm_numlatetasks_reshaped = np.array(np.reshape(all_node_norm_numlatetasks, (SimParams.NOC_H, SimParams.NOC_W)), dtype=float)
result = {
'newdata_plevels' : all_node_plevels_reshaped,
'newdata_qnpos' : all_node_qnpos_reshaped,
'newdata_qncount' : all_node_qncount_reshaped,
'newdata_slack' : all_node_slack_reshaped,
'newdata_numlatetasks' : all_node_norm_numlatetasks_reshaped,
'newdata_tqsize' : all_node_tqsize_reshaped,
'newdata_thresholdqn' : all_node_thresholdqn_reshaped,
'newdata_numimportedtasks' : all_node_numimportedtasks_reshaped,
}
return result
def _getPSAlgoTimestamps(self):
# get number of prop cycles executed
num_propcycles = len(self.RMInstance.PSAlgo.track_propcycles)
last_propcycle = self.RMInstance.PSAlgo.track_propcycles[-1] if len(self.RMInstance.PSAlgo.track_propcycles)>0 else None
# get number of decay cycles executed
num_decaycycles = len(self.RMInstance.PSAlgo.track_decaycycles)
last_decaycycle = self.RMInstance.PSAlgo.track_decaycycles[-1] if len(self.RMInstance.PSAlgo.track_decaycycles)>0 else None
title = "Time: " + str(self.env.now) + ", prop: " + str(num_propcycles) + "[" + str(last_propcycle) + "]" + \
", dec: " + str(num_decaycycles) + "[" + str(last_decaycycle) + "]"
return title
|
sternshus/Arelle
|
refs/heads/master
|
arelle/ValidateInfoset.py
|
1
|
'''
Created on Feb 15, 2012
@author: Mark V Systems Limited
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
from collections import defaultdict
from arelle.ModelDocument import Type
from arelle.ModelValue import qname
from arelle import XmlUtil, XbrlConst
from arelle.ValidateXbrlCalcs import inferredPrecision, inferredDecimals
def validate(val, modelXbrl, infosetModelXbrl):
infoset = infosetModelXbrl.modelDocument
if infoset.type == Type.INSTANCE:
# compare facts (assumed out of order)
infosetFacts = defaultdict(list)
for fact in infosetModelXbrl.facts:
infosetFacts[fact.qname].append(fact)
if len(modelXbrl.factsInInstance) != len(infosetModelXbrl.factsInInstance):
modelXbrl.error("arelle:infosetTest",
_("Fact counts mismatch, testcase instance %(foundFactCount)s, infoset instance %(expectedFactCount)s"),
modelObject=(modelXbrl.modelDocument, infosetModelXbrl.modelDocument),
foundFactCount=len(modelXbrl.factsInInstance),
expectedFactCount=len(infosetModelXbrl.factsInInstance))
else:
for i, instFact in enumerate(modelXbrl.facts):
infosetFact = None
for fact in infosetFacts[instFact.qname]:
if fact.isTuple and fact.isDuplicateOf(instFact, deemP0Equal=True):
infosetFact = fact
break
elif fact.isItem and fact.isVEqualTo(instFact, deemP0Equal=True):
infosetFact = fact
break
if infosetFact is None: # takes precision/decimals into account
if fact is not None:
fact.isVEqualTo(instFact, deemP0Equal=True)
modelXbrl.error("arelle:infosetTest",
_("Fact %(factNumber)s mismatch %(concept)s"),
modelObject=instFact,
factNumber=(i+1),
concept=instFact.qname)
else:
ptvPeriodType = infosetFact.get("{http://www.xbrl.org/2003/ptv}periodType")
ptvBalance = infosetFact.get("{http://www.xbrl.org/2003/ptv}balance")
ptvDecimals = infosetFact.get("{http://www.xbrl.org/2003/ptv}decimals")
ptvPrecision = infosetFact.get("{http://www.xbrl.org/2003/ptv}precision")
if ptvPeriodType and ptvPeriodType != instFact.concept.periodType:
modelXbrl.error("arelle:infosetTest",
_("Fact %(factNumber)s periodType mismatch %(concept)s expected %(expectedPeriodType)s found %(foundPeriodType)s"),
modelObject=(instFact, infosetFact),
factNumber=(i+1),
concept=instFact.qname,
expectedPeriodType=ptvPeriodType,
foundPeriodType=instFact.concept.periodType)
if ptvBalance and ptvBalance != instFact.concept.balance:
modelXbrl.error("arelle:infosetTest",
_("Fact %(factNumber)s balance mismatch %(concept)s expected %(expectedBalance)s found %(foundBalance)s"),
modelObject=(instFact, infosetFact),
factNumber=(i+1),
concept=instFact.qname,
expectedBalance=ptvBalance,
foundBalance=instFact.concept.balance)
if ptvDecimals and ptvDecimals != str(inferredDecimals(fact)):
modelXbrl.error("arelle:infosetTest",
_("Fact %(factNumber)s inferred decimals mismatch %(concept)s expected %(expectedDecimals)s found %(inferredDecimals)s"),
modelObject=(instFact, infosetFact),
factNumber=(i+1),
concept=instFact.qname,
expectedDecimals=ptvDecimals,
inferredDecimals=str(inferredDecimals(fact)))
if ptvPrecision and ptvPrecision != str(inferredPrecision(fact)):
modelXbrl.error("arelle:infosetTest",
_("Fact %(factNumber)s inferred precision mismatch %(concept)s expected %(expectedPrecision)s found %(inferredPrecision)s"),
modelObject=(instFact, infosetFact),
factNumber=(i+1),
concept=instFact.qname,
expectedPrecisions=ptvPrecision,
inferredPrecision=str(inferredPrecision(fact)))
elif infoset.type == Type.ARCSINFOSET:
# compare arcs
for arcElt in XmlUtil.children(infoset.xmlRootElement, "http://www.xbrl.org/2003/ptv", "arc"):
linkType = arcElt.get("linkType")
arcRole = arcElt.get("arcRole")
extRole = arcElt.get("extRole")
fromObj = resolvePath(modelXbrl, arcElt.get("fromPath"))
if fromObj is None:
modelXbrl.error("arelle:infosetTest",
_("Arc fromPath not found: %(fromPath)s"),
modelObject=arcElt, fromPath=arcElt.get("fromPath"))
continue
if linkType in ("label", "reference"):
labelLang = arcElt.get("labelLang")
resRole = arcElt.get("resRole")
if linkType == "label":
expectedLabel = XmlUtil.text(arcElt)
foundLabel = fromObj.label(preferredLabel=resRole,fallbackToQname=False,lang=None,strip=True,linkrole=extRole)
if foundLabel != expectedLabel:
modelXbrl.error("arelle:infosetTest",
_("Label expected='%(expectedLabel)s', found='%(foundLabel)s'"),
modelObject=arcElt, expectedLabel=expectedLabel, foundLabel=foundLabel)
continue
elif linkType == "reference":
expectedRef = XmlUtil.innerText(arcElt)
referenceFound = False
for refrel in modelXbrl.relationshipSet(XbrlConst.conceptReference,extRole).fromModelObject(fromObj):
ref = refrel.toModelObject
if resRole == ref.role:
foundRef = XmlUtil.innerText(ref)
if foundRef != expectedRef:
modelXbrl.error("arelle:infosetTest",
_("Reference inner text expected='%(expectedRef)s, found='%(foundRef)s'"),
modelObject=arcElt, expectedRef=expectedRef, foundRef=foundRef)
referenceFound = True
break
if referenceFound:
continue
modelXbrl.error("arelle:infosetTest",
_("%(linkType)s not found containing '%(text)s' linkRole %(linkRole)s"),
modelObject=arcElt, linkType=linkType.title(), text=XmlUtil.innerText(arcElt), linkRole=extRole)
else:
toObj = resolvePath(modelXbrl, arcElt.get("toPath"))
if toObj is None:
modelXbrl.error("arelle:infosetTest",
_("Arc toPath not found: %(toPath)s"),
modelObject=arcElt, toPath=arcElt.get("toPath"))
continue
weight = arcElt.get("weight")
if weight is not None:
weight = float(weight)
order = arcElt.get("order")
if order is not None:
order = float(order)
preferredLabel = arcElt.get("preferredLabel")
found = False
for rel in modelXbrl.relationshipSet(arcRole, extRole).fromModelObject(fromObj):
if (rel.toModelObject == toObj and
(weight is None or rel.weight == weight) and
(order is None or rel.order == order)):
found = True
if not found:
modelXbrl.error("arelle:infosetTest",
_("Arc not found: from %(fromPath)s, to %(toPath)s, role %(arcRole)s, linkRole $(extRole)s"),
modelObject=arcElt, fromPath=arcElt.get("fromPath"), toPath=arcElt.get("toPath"), arcRole=arcRole, linkRole=extRole)
continue
# validate dimensions of each fact
factElts = XmlUtil.children(modelXbrl.modelDocument.xmlRootElement, None, "*")
for itemElt in XmlUtil.children(infoset.xmlRootElement, None, "item"):
try:
qnElt = XmlUtil.child(itemElt,None,"qnElement")
factQname = qname(qnElt, XmlUtil.text(qnElt))
sPointer = int(XmlUtil.child(itemElt,None,"sPointer").text)
factElt = factElts[sPointer - 1] # 1-based xpath indexing
if factElt.qname != factQname:
modelXbrl.error("arelle:infosetTest",
_("Fact %(sPointer)s mismatch Qname, expected %(qnElt)s, observed %(factQname)s"),
modelObject=itemElt, sPointer=sPointer, qnElt=factQname, factQname=factElt.qname)
elif not factElt.isItem or factElt.context is None:
modelXbrl.error("arelle:infosetTest",
_("Fact %(sPointer)s has no context: %(qnElt)s"),
modelObject=(itemElt,factElt), sPointer=sPointer, qnElt=factQname)
else:
context = factElt.context
memberElts = XmlUtil.children(itemElt,None,"member")
numNonDefaults = 0
for memberElt in memberElts:
dimElt = XmlUtil.child(memberElt, None, "qnDimension")
qnDim = qname(dimElt, XmlUtil.text(dimElt))
isDefault = XmlUtil.text(XmlUtil.child(memberElt, None, "bDefaulted")) == "true"
if not isDefault:
numNonDefaults += 1
if not ((qnDim in context.qnameDims and not isDefault) or
(qnDim in factElt.modelXbrl.qnameDimensionDefaults and isDefault)):
modelXbrl.error("arelle:infosetTest",
_("Fact %(sPointer)s (qnElt)s dimension mismatch %(qnDim)s"),
modelObject=(itemElt, factElt, context), sPointer=sPointer, qnElt=factQname, qnDim=qnDim)
if numNonDefaults != len(context.qnameDims):
modelXbrl.error("arelle:infosetTest",
_("Fact %(sPointer)s (qnElt)s dimensions count mismatch"),
modelObject=(itemElt, factElt, context), sPointer=sPointer, qnElt=factQname)
except (IndexError, ValueError, AttributeError) as err:
modelXbrl.error("arelle:infosetTest",
_("Invalid entity fact dimensions infoset sPointer: %(test)s, error details: %(error)s"),
modelObject=itemElt, test=XmlUtil.innerTextList(itemElt), error=str(err))
def resolvePath(modelXbrl, namespaceId):
ns, sep, id = (namespaceId or "#").partition("#")
docs = modelXbrl.namespaceDocs.get(ns)
if docs: # a list of schema modelDocs with this namespace
doc = docs[0]
if id in doc.idObjects:
return doc.idObjects[id]
return None
def validateRenderingInfoset(modelXbrl, comparisonFile, sourceDoc):
from lxml import etree
try:
comparisonDoc = etree.parse(comparisonFile)
sourceIter = sourceDoc.iter()
comparisonIter = comparisonDoc.iter()
sourceElt = next(sourceIter, None)
comparisonElt = next(comparisonIter, None)
# skip over nsmap elements used to create output trees
while (sourceElt is not None and sourceElt.tag == "nsmap"):
sourceElt = next(sourceIter, None)
while (comparisonElt is not None and sourceElt.tag == "nsmap"):
comparisonElt = next(comparisonIter, None)
while (sourceElt is not None and comparisonElt is not None):
while (isinstance(sourceElt, etree._Comment)):
sourceElt = next(sourceIter, None)
while (isinstance(comparisonElt, etree._Comment)):
comparisonElt = next(comparisonIter, None)
sourceEltTag = sourceElt.tag if sourceElt is not None else '(no more elements)'
comparisonEltTag = comparisonElt.tag if comparisonElt is not None else '(no more elements)'
if sourceEltTag != comparisonEltTag:
modelXbrl.error("arelle:infosetElementMismatch",
_("Infoset expecting %(elt1)s found %(elt2)s source line %(elt1line)s comparison line %(elt2line)s"),
modelObject=modelXbrl, elt1=sourceEltTag, elt2=comparisonEltTag,
elt1line=sourceElt.sourceline, elt2line=comparisonElt.sourceline)
else:
text1 = (sourceElt.text or '').strip() or '(none)'
text2 = (comparisonElt.text or '').strip() or '(none)'
if text1 != text2:
modelXbrl.error("arelle:infosetTextMismatch",
_("Infoset comparison element %(elt)s expecting text %(text1)s found %(text2)s source line %(elt1line)s comparison line %(elt2line)s"),
modelObject=modelXbrl, elt=sourceElt.tag, text1=text1, text2=text2,
elt1line=sourceElt.sourceline, elt2line=comparisonElt.sourceline)
attrs1 = dict(sourceElt.items())
attrs2 = dict(comparisonElt.items())
# remove attributes not to be compared
for attr in ("{http://www.w3.org/XML/1998/namespace}base",
):
if attr in attrs1: del attrs1[attr]
if attr in attrs2: del attrs2[attr]
if attrs1 != attrs2:
modelXbrl.error("arelle:infosetAttributesMismatch",
_("Infoset comparison element %(elt)s expecting attributes %(attrs1)s found %(attrs2)s source line %(elt1line)s comparison line %(elt2line)s"),
modelObject=modelXbrl, elt=sourceElt.tag,
attrs1=', '.join('{0}="{1}"'.format(k,v) for k,v in sorted(attrs1.items())),
attrs2=', '.join('{0}="{1}"'.format(k,v) for k,v in sorted(attrs2.items())),
elt1line=sourceElt.sourceline, elt2line=comparisonElt.sourceline)
sourceElt = next(sourceIter, None)
comparisonElt = next(comparisonIter, None)
except (IOError, etree.LxmlError) as err:
modelXbrl.error("arelle:infosetFileError",
_("Infoset comparison file %(xmlfile)s error %(error)s"),
modelObject=modelXbrl, xmlfile=comparisonFile, error=str(err))
|
chrisxue815/leetcode_python
|
refs/heads/master
|
problems/test_0200_dfs.py
|
1
|
import unittest
def _set0(grid, i, j):
grid[i][j] = '0'
if i >= 1 and grid[i - 1][j] == '1':
_set0(grid, i - 1, j)
if i + 1 < len(grid) and grid[i + 1][j] == '1':
_set0(grid, i + 1, j)
if j >= 1 and grid[i][j - 1] == '1':
_set0(grid, i, j - 1)
if j + 1 < len(grid[0]) and grid[i][j + 1] == '1':
_set0(grid, i, j + 1)
# O(n) time. O(1) space. DFS.
class Solution:
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
result = 0
for i in range(len(grid)):
row = grid[i]
for j in range(len(row)):
if row[j] == '1':
result += 1
_set0(grid, i, j)
return result
class Test(unittest.TestCase):
def test(self):
self._test([
'11110',
'11010',
'11000',
'00000',
], 1)
self._test([
'11000',
'11000',
'00100',
'00011',
], 3)
def _test(self, grid, expected):
grid = [list(row) for row in grid]
actual = Solution().numIslands(grid)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
ruohoruotsi/Wavelet-Tree-Synth
|
refs/heads/master
|
nnet/keeper_LSTMVRAE-JayHack-RyotaKatoh-chainer/midi/utils.py
|
1
|
# Author: Nicolas Boulanger-Lewandowski
# University of Montreal (2013)
# RNN-RBM deep learning tutorial
#
# Implements midiread and midiwrite functions to read/write MIDI files to/from piano-rolls
from MidiOutFile import MidiOutFile
from MidiInFile import MidiInFile
from MidiOutStream import MidiOutStream
import midi # to print out note values
import numpy
class midiread(MidiOutStream):
def __init__(self, filename, r=(21, 109), dt=0.2):
self.notes = []
self._tempo = 500000
self.beat = 0
self.time = 0.0
midi_in = MidiInFile(self, filename)
midi_in.read()
self.notes = [n for n in self.notes if n[2] is not None] # purge incomplete notes
length = int(numpy.ceil(max(zip(*self.notes)[2]) / dt)) # create piano-roll
self.piano_roll = numpy.zeros((length, r[1]-r[0]))
for n in self.notes:
self.piano_roll[int(numpy.ceil(n[1]/dt)) : int(numpy.ceil(n[2]/dt)), n[0]-r[0]] = 1
def abs_time_in_seconds(self):
return self.time + self._tempo * (self.abs_time() - self.beat) * 1e-6 / self.div
def tempo(self, value):
self.time = self.abs_time_in_seconds()
self.beat = self.abs_time()
self._tempo = value
def header(self, format=0, nTracks=1, division=96):
self.div = division
def note_on(self, channel=0, note=0x40, velocity=0x40):
time = self.abs_time_in_seconds()
self.notes.append([note, time, None])
# IOHAVOC -- trying to see what notes we have
# print(midi.NOTE_VALUE_MAP_SHARP[note], time)
# print(note, time)
def note_off(self, channel=0, note=0x40, velocity=0x40):
i = len(self.notes) - 1
while i >= 0 and self.notes[i][0] != note:
i -= 1
if i >= 0 and self.notes[i][2] is None:
self.notes[i][2] = self.abs_time_in_seconds()
def sysex_event(*args):
pass
def device_name(*args):
pass
def midiwrite(filename, piano_roll, r=(21, 109), dt=0.2, patch=0):
midi = MidiOutFile(filename)
midi.header(division=100)
midi.start_of_track()
midi.patch_change(channel=0, patch=patch)
t = 0
samples = [i.nonzero()[0] + r[0] for i in piano_roll]
for i in xrange(len(samples)):
for f in samples[i]:
if i==0 or f not in samples[i-1]:
midi.update_time(t)
midi.note_on(channel=0, note=f, velocity=90)
t = 0
t += int(dt*200)
for f in samples[i]:
if i==len(samples)-1 or f not in samples[i+1]:
midi.update_time(t)
midi.note_off(channel=0, note=f, velocity=0)
t = 0
midi.update_time(0)
midi.end_of_track()
midi.eof()
|
Stanford-Online/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/password_policy/settings/aws.py
|
24
|
"""
Production settings for the password_policy app.
"""
def plugin_settings(settings):
"""
Override the default password_policy app settings with production settings.
"""
config = dict(settings.PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG)
config.update(settings.ENV_TOKENS.get('PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG', {}))
settings.PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG = config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.