repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
syci/OCB
|
refs/heads/9.0
|
addons/crm_partner_assign/crm_lead.py
|
46
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from openerp.exceptions import UserError
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise UserError(_("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('team_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'team_id': salesteam_id}, context=context)
|
evanv/simhash-db-py
|
refs/heads/master
|
bench.py
|
2
|
#! /usr/bin/env python
'''This is a utility to run a benchmark against a backend'''
import os
import time
import psutil
import random
import argparse
from simhash_db import Client, GeneralException
parser = argparse.ArgumentParser(description='Run benchmarks on simhash_db')
parser.add_argument('--count', type=int, default=1000,
help='How many thousands of keys should be inserted per process')
parser.add_argument('--name', type=str, default='testing',
help='The name of the set of simhashes to use')
parser.add_argument('--processes', type=int, default=(2 * psutil.NUM_CPUS),
help='How many processes should be forked (defaults to 2 x NUM_CPUS)')
parser.add_argument('--num-blocks', dest='num_blocks', type=int, default=6,
help='How many blocks to configure the client to use')
parser.add_argument('--num-bits', dest='num_bits', type=int, default=3,
help='How many bits to configure the client to use')
parser.add_argument('--backend', type=str, required=True,
help='Which backend to use')
parser.add_argument('--config', type=str, required=False,
help='Path to a yaml file with the host configuration')
args = parser.parse_args()
# If a configuration file was provided, we should use it
if args.config:
from yaml import load
with open(args.config) as fin:
kwargs = load(fin.read())
else:
kwargs = {}
def make_seeds():
'''
Generate all the hashes that we'll be using. We'd like to be able to get a
large number of hashes for insertion, but we also don't want to:
1) use a lot of memory holding said set
2) spend a lot of time generating that set
So, we generate a number of random seed values, and then insert 1000 hashes
beginning with that number and skipping by another random number. These
pairs of (seed, skip) are returned'''
return [(
random.randint(0, 2 ** 64),
random.randint(1, 1000)
) for i in range(args.count)]
def insert():
'''Run the timing numbers for each of the provided seeds for insertion'''
seeds = make_seeds()
client = Client(args.backend, args.name, args.num_blocks, args.num_bits,
**kwargs)
for i in range(1000):
if i % 25 == 0:
print 'Inserting batch %i' % i
# We want to get a relatively good mix each time we insert data
hashes = [(start + i * interval) for start, interval in seeds]
try:
results = client.insert(hashes)
except GeneralException as exc:
print '---> Client exception: %s' % repr(exc)
# Since this is meant to be run in a subprocess...
exit(0)
def query():
'''Run the timing numbers for each of the provided seeds for query all'''
seeds = make_seeds()
client = Client(args.backend, args.name, args.num_blocks, args.num_bits,
**kwargs)
for i in range(1000):
if i % 25 == 0:
print 'Querying batch %i' % i
# We want to get a relatively good mix each time we insert data
hashes = [(start + i * interval) for start, interval in seeds]
try:
results = client.find_all(hashes)
except GeneralException as exc:
print '---> Client exception: %s' % repr(exc)
def time_children(processes, function, *args, **kwargs):
'''Run `processes` number of forked processes, each running `function` with
the provided arguments, and return the timing information'''
start = -time.time()
times = []
for i in range(processes):
pid = os.fork()
if pid == 0:
function(*args, **kwargs)
else:
print '---> Started %i' % pid
# And now wait for them, and collect the timing information
for i in range(processes):
pid, status = os.wait()
times.append(start + time.time())
print '---> %i finished in %fs' % (pid, times[-1])
return times
# Now run the benchmark itself and print out the results
insert_times = time_children(args.processes, insert)
query_times = time_children(args.processes, query)
# This is how many hashes we actually tried to insert
count = 1000 * args.count
print 'Insertion:'
print ' Times (min | avg | max): %10.5f s | %10.5f s | %10.5f s ' % (
min(insert_times), sum(insert_times) / len(insert_times),
max(insert_times))
print ' Rate (min | avg | max): %10i / s | %10i / s | %10i / s' % (
count / max(insert_times), count * len(insert_times) / sum(insert_times),
count / min(insert_times))
print 'Query:'
print ' Times (min | avg | max): %10.5f s | %10.5f s | %10.5f s ' % (
min(query_times), sum(query_times) / len(query_times), max(query_times))
print ' Rate (min | avg | max): %10i / s | %10i / s | %10i / s' % (
count / max(query_times), count * len(query_times) / sum(query_times),
count / min(query_times))
|
Elico-Corp/odoo_OCB
|
refs/heads/9.0
|
addons/sale_stock/company.py
|
44
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models
class company(models.Model):
_inherit = 'res.company'
security_lead = fields.Float('Sales Safety Days', required=True, default = 0.0,
help="Margin of error for dates promised to customers. "\
"Products will be scheduled for procurement and delivery "\
"that many days earlier than the actual promised date, to "\
"cope with unexpected delays in the supply chain.")
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
instal/script.module.liveresolver/lib/js2py/constructors/jsarray.py
|
31
|
from js2py.base import *
@Js
def Array():
if len(arguments)==0 or len(arguments)>1:
return arguments.to_list()
a = arguments[0]
if isinstance(a, PyJsString):
length = a.to_uint32()
if length!=a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js([])
temp.put('length', a)
return temp
return [a]
Array.create = Array
Array.own['length']['value'] = Js(1)
@Js
def isArray(arg):
return arg.Class=='Array'
Array.define_own_property('isArray', {'value': isArray,
'enumerable': False,
'writable': True,
'configurable': True})
Array.define_own_property('prototype', {'value': ArrayPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
ArrayPrototype.define_own_property('constructor', {'value': Array,
'enumerable': False,
'writable': True,
'configurable': True})
|
h3biomed/ansible
|
refs/heads/h3
|
contrib/inventory/rackhd.py
|
52
|
#!/usr/bin/env python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import requests
import argparse
RACKHD_URL = 'http://localhost:8080'
class RackhdInventory(object):
def __init__(self, nodeids):
self._inventory = {}
for nodeid in nodeids:
self._load_inventory_data(nodeid)
inventory = {}
for (nodeid, info) in self._inventory.items():
inventory[nodeid] = (self._format_output(nodeid, info))
print(json.dumps(inventory))
def _load_inventory_data(self, nodeid):
info = {}
info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
results = {}
for (key, url) in info.items():
r = requests.get(url, verify=False)
results[key] = r.text
self._inventory[nodeid] = results
def _format_output(self, nodeid, info):
try:
node_info = json.loads(info['lookup'])
ipaddress = ''
if len(node_info) > 0:
ipaddress = node_info[0]['ipAddress']
output = {'hosts': [ipaddress], 'vars': {}}
for (key, result) in info.items():
output['vars'][key] = json.loads(result)
output['vars']['ansible_ssh_user'] = 'monorail'
except KeyError:
pass
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
return parser.parse_args()
try:
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
except Exception:
# use default values
pass
# Use the nodeid specified in the environment to limit the data returned
# or return data for all available nodes
nodeids = []
if (parse_args().host):
try:
nodeids += parse_args().host.split(',')
RackhdInventory(nodeids)
except Exception:
pass
if (parse_args().list):
try:
url = RACKHD_URL + '/api/common/nodes'
r = requests.get(url, verify=False)
data = json.loads(r.text)
for entry in data:
if entry['type'] == 'compute':
nodeids.append(entry['id'])
RackhdInventory(nodeids)
except Exception:
pass
|
efeguney/gemmlowp
|
refs/heads/master
|
meta/generators/streams_arm_32.py
|
7
|
# Copyright 2016 The Gemmlowp Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the arm32 headers used by the gemm/gemv lib."""
import cc_emitter
import common
import neon_emitter
import streams_common
def Main():
"""."""
cc = cc_emitter.CCEmitter()
common.GenerateHeader(cc, 'gemmlowp_meta_streams_arm_32', 'GEMMLOWP_NEON_32')
cc.EmitNamespaceBegin('gemmlowp')
cc.EmitNamespaceBegin('meta')
cc.EmitNewline()
streams_common.GenerateUInt8x8Streams(cc, neon_emitter.NeonEmitter(), 8)
cc.EmitNamespaceEnd()
cc.EmitNamespaceEnd()
cc.EmitNewline()
common.GenerateFooter(cc, 'Meta gemm for arm32 requires: GEMMLOWP_NEON_32!')
if __name__ == '__main__':
Main()
|
petxo/clitellum
|
refs/heads/master
|
clitellum/core/bus.py
|
1
|
# author = sbermudel
# package description
from datetime import date
import datetime
class Context:
def __init__(self):
pass
_instance = None
@classmethod
def create(cls):
cls._instance = Context()
@classmethod
def instance(cls):
if cls._instance is None:
cls.create()
return cls._instance
class Identification:
def __init__(self):
pass
@classmethod
def create(cls, id_service, type_service):
identification = dict()
identification['Id'] = id_service
identification['Type'] = type_service
return identification
class MessageHeader:
def __init__(self):
pass
@classmethod
def create(cls, body_type, id_service, type_service):
header = dict()
header['BodyType'] = body_type
header['Priority'] = 0
header['CreatedAt'] = datetime.datetime.now().isoformat()
header['CallContext'] = dict()
header['IdentificationService'] = Identification.create(id_service, type_service)
return header
class MessageBus:
def __init__(self):
pass
## Devuelve un diccionario del mensaje que se enviara al bus
@classmethod
def create(cls, body, body_type, id_service, type_service):
message = dict()
message['Body'] = body
message['Header'] = MessageHeader.create(body_type, id_service, type_service)
return message
|
theguardian/headphones
|
refs/heads/master
|
lib/html5lib/treeadapters/sax.py
|
1835
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
lahosken/pants
|
refs/heads/master
|
src/python/pants/backend/docgen/register.py
|
15
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.docgen.targets.doc import Page, Wiki, WikiArtifact
from pants.backend.docgen.tasks.confluence_publish import ConfluencePublish
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'page': Page,
},
objects={
# TODO: This is a Task subclass, and so should definitely not be a BUILD file object.
# Presumably it is to allow it to be subclassed in a BUILD file, and we do NOT endorse that kind of thing.
'ConfluencePublish': ConfluencePublish,
'wiki_artifact': WikiArtifact,
# TODO: Why is this capitalized?
'Wiki': Wiki,
},
)
def register_goals():
task(name='markdown', action=MarkdownToHtml).install(),
task(name='reference', action=GeneratePantsReference).install()
|
sanyaade-teachings/gyp
|
refs/heads/master
|
test/msvs/config_attrs/gyptest-config_attrs.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_configuration_attributes and
msbuild_configuration_attributes are applied by using
them to set the OutputDirectory.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(workdir='workarea_all',formats=['msvs'])
vc_version = 'VC90'
if os.getenv('GYP_MSVS_VERSION'):
vc_version = ['VC90','VC100'][int(os.getenv('GYP_MSVS_VERSION')) >= 2010]
expected_exe_file = os.path.join(test.workdir, vc_version, 'hello.exe')
test.run_gyp('hello.gyp')
test.build('hello.gyp')
test.must_exist(expected_exe_file)
test.pass_test()
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/multiple_database/models.py
|
109
|
from __future__ import absolute_import
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
@python_2_unicode_compatible
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = generic.GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
amitsaha/learning
|
refs/heads/master
|
python/graphs/task_ordering.py
|
1
|
'''
Find the task ordering from a set of dependency rules:
Example rules:
3 -> 1 5
2 -> 5 3
4 -> 3
5 -> 1
'''
from __future__ import print_function
class Node:
def __init__(self, label, neighbors=[]):
self.label = label
self.neighbors = neighbors
def __str__(self):
return self.label + '->' + str([n.label for n in self.neighbors])
def update_neighbors(self, neighbors=[]):
self.neighbors = neighbors
def toposort():
one = Node('1')
two = Node('2')
three = Node('3')
four = Node('4')
five = Node('5')
# We create the graph in such a way that the dependencies
# have vertices pointing towards the dependents and then just
# do a topological sorting using Kahn's method
one.update_neighbors([five])
five.update_neighbors([two, three])
three.update_neighbors([two, four])
vertices = [one, two, three, four, five]
# create the set of vertices with no incoming edges
# -> vertex which is not in the right side of any of the nodes
def get_vertices_with_incoming_edges():
neighbors = []
for v in vertices:
if v.neighbors:
neighbors.extend(v.neighbors)
return set(neighbors)
sorted_v = []
with_incoming_edges = get_vertices_with_incoming_edges()
no_incoming_edges = [v for v in vertices if v not in with_incoming_edges]
while no_incoming_edges:
n = no_incoming_edges.pop()
sorted_v.append(n)
for m in list(n.neighbors):
# remove the edge from n to m
n.neighbors.remove(m)
with_incoming_edges = get_vertices_with_incoming_edges()
if m not in with_incoming_edges:
no_incoming_edges.append(m)
if get_vertices_with_incoming_edges():
print('Error')
else:
print([v.label for v in sorted_v])
if __name__ == '__main__':
toposort()
|
DARKPOP/external_chromium_org
|
refs/heads/dark-5.1
|
chrome/common/extensions/PRESUBMIT.py
|
56
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import fnmatch
import os
import re
EXTENSIONS_PATH = os.path.join('chrome', 'common', 'extensions')
DOCS_PATH = os.path.join(EXTENSIONS_PATH, 'docs')
SERVER2_PATH = os.path.join(DOCS_PATH, 'server2')
API_PATH = os.path.join(EXTENSIONS_PATH, 'api')
TEMPLATES_PATH = os.path.join(DOCS_PATH, 'templates')
PRIVATE_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'private')
PUBLIC_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'public')
INTROS_PATH = os.path.join(TEMPLATES_PATH, 'intros')
ARTICLES_PATH = os.path.join(TEMPLATES_PATH, 'articles')
LOCAL_PUBLIC_TEMPLATES_PATH = os.path.join('docs',
'templates',
'public')
EXTENSIONS_TO_REMOVE_FOR_CLEAN_URLS = ('.md', '.html')
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _ListFilesInPublic():
all_files = []
for path, dirs, files in os.walk(LOCAL_PUBLIC_TEMPLATES_PATH):
all_files.extend(
os.path.join(path, filename)[len(LOCAL_PUBLIC_TEMPLATES_PATH + os.sep):]
for filename in files)
return all_files
def _UnixName(name):
name = os.path.splitext(name)[0]
s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name)
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
return s2.replace('.', '_').lower()
def _FindMatchingTemplates(template_name, template_path_list):
matches = []
unix_name = _UnixName(template_name)
for template in template_path_list:
if unix_name == _UnixName(template.split(os.sep)[-1]):
basename, ext = os.path.splitext(template)
# The docserver expects clean (extensionless) template URLs, so we
# strip some extensions here when generating the list of matches.
if ext in EXTENSIONS_TO_REMOVE_FOR_CLEAN_URLS:
matches.append(basename)
else:
matches.append(template)
return matches
def _SanitizeAPIName(name, api_path):
if not api_path.endswith(os.sep):
api_path += os.sep
filename = os.path.splitext(name)[0][len(api_path):].replace(os.sep, '_')
if 'experimental' in filename:
filename = 'experimental_' + filename.replace('experimental_', '')
return filename
def _CreateIntegrationTestArgs(affected_files):
if (any(fnmatch.fnmatch(name, '%s*.py' % SERVER2_PATH)
for name in affected_files) or
any(fnmatch.fnmatch(name, '%s*' % PRIVATE_TEMPLATES_PATH)
for name in affected_files)):
return ['-a']
args = []
for name in affected_files:
if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or
fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH)):
args.extend(_FindMatchingTemplates(name.split(os.sep)[-1],
_ListFilesInPublic()))
if fnmatch.fnmatch(name, '%s*' % API_PATH):
args.extend(_FindMatchingTemplates(_SanitizeAPIName(name, API_PATH),
_ListFilesInPublic()))
return args
def _CheckHeadingIDs(input_api):
ids_re = re.compile('<h[23].*id=.*?>')
headings_re = re.compile('<h[23].*?>')
bad_files = []
for name in input_api.AbsoluteLocalPaths():
if not os.path.exists(name):
continue
if (fnmatch.fnmatch(name, '*%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '*%s*' % ARTICLES_PATH)):
contents = input_api.ReadFile(name)
if (len(re.findall(headings_re, contents)) !=
len(re.findall(ids_re, contents))):
bad_files.append(name)
return bad_files
def _CheckLinks(input_api, output_api, results):
for affected_file in input_api.AffectedFiles():
name = affected_file.LocalPath()
absolute_path = affected_file.AbsoluteLocalPath()
if not os.path.exists(absolute_path):
continue
if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or
fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH) or
fnmatch.fnmatch(name, '%s*' % API_PATH)):
contents = _ReadFile(absolute_path)
args = []
if input_api.platform == 'win32':
args = [input_api.python_executable]
args.extend([os.path.join('docs', 'server2', 'link_converter.py'),
'-o',
'-f',
absolute_path])
output = input_api.subprocess.check_output(
args,
cwd=input_api.PresubmitLocalPath(),
universal_newlines=True)
if output != contents:
changes = ''
for i, (line1, line2) in enumerate(
zip(contents.split('\n'), output.split('\n'))):
if line1 != line2:
changes = ('%s\nLine %d:\n-%s\n+%s\n' %
(changes, i + 1, line1, line2))
if changes:
results.append(output_api.PresubmitPromptWarning(
'File %s may have an old-style <a> link to an API page. Please '
'run docs/server2/link_converter.py to convert the link[s], or '
'convert them manually.\n\nSuggested changes are: %s' %
(name, changes)))
def _CheckChange(input_api, output_api):
results = [
output_api.PresubmitError('File %s needs an id for each heading.' % name)
for name in _CheckHeadingIDs(input_api)]
try:
integration_test = []
# From depot_tools/presubmit_canned_checks.py:529
if input_api.platform == 'win32':
integration_test = [input_api.python_executable]
integration_test.append(
os.path.join('docs', 'server2', 'integration_test.py'))
integration_test.extend(_CreateIntegrationTestArgs(input_api.LocalPaths()))
input_api.subprocess.check_call(integration_test,
cwd=input_api.PresubmitLocalPath())
except input_api.subprocess.CalledProcessError:
results.append(output_api.PresubmitError('IntegrationTest failed!'))
# TODO(kalman): Re-enable this check, or decide to delete it forever. Now
# that we have multiple directories it no longer works.
# See http://crbug.com/297178.
#_CheckLinks(input_api, output_api, results)
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
results += _CheckChange(input_api, output_api)
return results
def CheckChangeOnCommit(input_api, output_api):
return _CheckChange(input_api, output_api)
|
AhmadHamzeei/Amir-Accounting
|
refs/heads/master
|
amir/dbconfig.py
|
1
|
import database
from share import share
config = share.config
## \defgroup Controller
## @{
class dbConfig:
data = {
'co-name' :'Enter Company name',
'co-logo' :'',
'custSubject' : '4',
'bank' : '1',
'cash' :'14',
'buy' :'17',
'sell' :'18',
'sell-discount' :'25',
'tax' :'33',
'partners' : '8',
'cost' : '2',
'bank-wage' : '31',
'sell-adds' : '7',
#"fund": '9',
#"acc-receivable": '10',
#"commission": '11',
}
def get_default(self, key):
try:
return self.data[key]
except KeyError:
return ''
def get_value(self, key):
key = unicode(key)
query = config.db.session.query(database.Config)
query = query.filter(database.Config.cfgKey == key)
return query.first().cfgValue
def exists(self, key):
query = config.db.session.query(database.Config)
query = query.filter(database.Config.cfgKey == key)
if query.first():
return True
return False
def set_value(self, key, val, commit=True):
val = unicode(val)
query = config.db.session.query(database.Config)
query = query.filter(database.Config.cfgId == key)
query = query.update({u'cfgValue':val})
if commit: # commit all of the at once for more speed
config.db.session.commit()
def add(self, key, mode, desc):
if self.exists(key):
raise Exception('Key already exists')
row = database.Config(unicode(key), u'', unicode(desc), mode, 2)
config.db.session.add(row)
config.db.session.commit()
def delete(self, id):
query = config.db.session.query(database.Config).filter(database.Config.cfgId == id).first()
config.db.session.delete(query)
config.db.session.commit()
def get_int(self ,key):
try:
return int(self.get_value(key))
except ValueError:
return None
def get_int_list(self, key):
val = []
try:
for item in self.get_value(key).split(','):
if len(item) != 0:
val.append(int(item))
except ValueError:
return None
return val
try:
dbconf
except NameError:
dbconf = dbConfig()
## @}
|
GoSteven/Diary
|
refs/heads/master
|
django/contrib/admindocs/models.py
|
12
|
# Empty models.py to allow for specifying admindocs as a test label.
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/gkehub/v1beta1/gkehub-v1beta1-py/google/cloud/gkehub_v1beta1/services/gke_hub_membership_service/pagers.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.gkehub_v1beta1.types import membership
class ListMembershipsPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1beta1.types.ListMembershipsResponse` object, and
provides an ``__iter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1beta1.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., membership.ListMembershipsResponse],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1beta1.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1beta1.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[membership.Membership]:
for page in self.pages:
yield from page.resources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMembershipsAsyncPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1beta1.types.ListMembershipsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1beta1.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[membership.ListMembershipsResponse]],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1beta1.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1beta1.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[membership.Membership]:
async def async_generator():
async for page in self.pages:
for response in page.resources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
whitehorse-io/encarnia
|
refs/heads/master
|
pyenv/lib/python2.7/site-packages/twisted/trial/test/test_output.py
|
18
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the output generated by trial.
"""
from __future__ import absolute_import, division, print_function
import os
from twisted.python.compat import NativeStringIO, _PY3
from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
if _PY3:
_noModuleError = "No module named 'frotz'"
else:
_noModuleError = "No module named frotz"
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = NativeStringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config['temp-directory'])
suite = trial._getSuite(config)
myRunner.run(suite)
return output.getvalue()
class ImportErrorsTests(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial('--temp-directory', self.mktemp(), *args)
def _print(self, stuff):
print(stuff)
return stuff
def assertIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(ImportErrorsTests, self).assertIn(
containee, container, *args, **kwargs)
return container
def assertNotIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(ImportErrorsTests, self).assertNotIn(
containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial('twisted.doesntexist')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'twisted.doesntexist')
return d
def test_nonexistentPackage(self):
d = self.runTrial('doesntexist')
self.assertIn(d, 'doesntexist')
self.assertIn(d, 'ModuleNotFound')
self.assertIn(d, '[ERROR]')
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial('doesntexist.barney')
self.assertIn(d, 'doesntexist.barney')
self.assertIn(d, 'ObjectNotFound')
self.assertIn(d, '[ERROR]')
return d
def test_badpackage(self):
d = self.runTrial('badpackage')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'badpackage')
self.assertNotIn(d, 'IOError')
return d
def test_moduleInBadpackage(self):
d = self.runTrial('badpackage.test_module')
self.assertIn(d, "[ERROR]")
self.assertIn(d, "badpackage.test_module")
self.assertNotIn(d, 'IOError')
return d
def test_badmodule(self):
d = self.runTrial('package.test_bad_module')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'package.test_bad_module')
self.assertNotIn(d, 'IOError')
self.assertNotIn(d, '<module ')
return d
def test_badimport(self):
d = self.runTrial('package.test_import_module')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'package.test_import_module')
self.assertNotIn(d, 'IOError')
self.assertNotIn(d, '<module ')
return d
def test_recurseImport(self):
d = self.runTrial('package')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'test_bad_module')
self.assertIn(d, 'test_import_module')
self.assertNotIn(d, '<module ')
self.assertNotIn(d, 'IOError')
return d
def test_recurseImportErrors(self):
d = self.runTrial('package2')
self.assertIn(d, '[ERROR]')
self.assertIn(d, 'package2')
self.assertIn(d, 'test_module')
self.assertIn(d, _noModuleError)
self.assertNotIn(d, '<module ')
self.assertNotIn(d, 'IOError')
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial('-N', 'package2')
self.assertIn(d, '[ERROR]')
self.assertIn(d, _noModuleError)
self.assertNotIn(d, '<module ')
return d
def test_regularRun(self):
d = self.runTrial('package.test_module')
self.assertNotIn(d, '[ERROR]')
self.assertNotIn(d, 'IOError')
self.assertIn(d, 'OK')
self.assertIn(d, 'PASSED (successes=1)')
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent, 'package', 'test_module.py'))
self.assertNotIn(d, '[ERROR]')
self.assertNotIn(d, 'IOError')
self.assertIn(d, 'OK')
self.assertIn(d, 'PASSED (successes=1)')
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent,
'package', 'test_dos_module.py'))
self.assertNotIn(d, '[ERROR]')
self.assertNotIn(d, 'IOError')
self.assertIn(d, 'OK')
self.assertIn(d, 'PASSED (successes=1)')
return d
|
abramhindle/UnnaturalCodeFork
|
refs/heads/master
|
python/testdata/launchpad/lib/lp/blueprints/model/specificationsearch.py
|
1
|
# Copyright 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Helper methods to search specifications."""
__metaclass__ = type
__all__ = [
'get_specification_filters',
'get_specification_active_product_filter',
'get_specification_privacy_filter',
'search_specifications',
]
from collections import defaultdict
from storm.expr import (
And,
Coalesce,
Join,
LeftJoin,
Not,
Or,
Select,
)
from storm.locals import (
Desc,
SQL,
)
from zope.component import getUtility
from lp.app.enums import PUBLIC_INFORMATION_TYPES
from lp.blueprints.enums import (
SpecificationDefinitionStatus,
SpecificationFilter,
SpecificationGoalStatus,
SpecificationImplementationStatus,
SpecificationSort,
)
from lp.blueprints.model.specification import Specification
from lp.blueprints.model.specificationbranch import SpecificationBranch
from lp.blueprints.model.specificationworkitem import SpecificationWorkItem
from lp.registry.interfaces.distribution import IDistribution
from lp.registry.interfaces.distroseries import IDistroSeries
from lp.registry.interfaces.person import IPersonSet
from lp.registry.interfaces.product import IProduct
from lp.registry.interfaces.productseries import IProductSeries
from lp.registry.interfaces.role import IPersonRoles
from lp.registry.model.teammembership import TeamParticipation
from lp.services.database.bulk import load_referencing
from lp.services.database.decoratedresultset import DecoratedResultSet
from lp.services.database.interfaces import IStore
from lp.services.database.stormexpr import (
Array,
ArrayAgg,
ArrayIntersects,
fti_search,
)
from lp.services.propertycache import get_property_cache
def search_specifications(context, base_clauses, user, sort=None,
quantity=None, spec_filter=None, tables=[],
default_acceptance=False, need_people=True,
need_branches=True, need_workitems=False):
store = IStore(Specification)
if not default_acceptance:
default = SpecificationFilter.INCOMPLETE
options = set([
SpecificationFilter.COMPLETE, SpecificationFilter.INCOMPLETE])
else:
default = SpecificationFilter.ACCEPTED
options = set([
SpecificationFilter.ACCEPTED, SpecificationFilter.DECLINED,
SpecificationFilter.PROPOSED])
if not spec_filter:
spec_filter = [default]
if not set(spec_filter) & options:
spec_filter.append(default)
if not tables:
tables = [Specification]
clauses = base_clauses
product_table, product_clauses = get_specification_active_product_filter(
context)
tables.extend(product_table)
for extend in (get_specification_privacy_filter(user),
get_specification_filters(spec_filter), product_clauses):
clauses.extend(extend)
# Sort by priority descending, by default.
if sort is None or sort == SpecificationSort.PRIORITY:
order = [
Desc(Specification.priority), Specification.definition_status,
Specification.name]
elif sort == SpecificationSort.DATE:
if SpecificationFilter.COMPLETE in spec_filter:
# If we are showing completed, we care about date completed.
order = [Desc(Specification.date_completed), Specification.id]
else:
# If not specially looking for complete, we care about date
# registered.
order = []
show_proposed = set(
[SpecificationFilter.ALL, SpecificationFilter.PROPOSED])
if default_acceptance and not (set(spec_filter) & show_proposed):
order.append(Desc(Specification.date_goal_decided))
order.extend([Desc(Specification.datecreated), Specification.id])
else:
order = [sort]
# Set the _known_viewers property for each specification, as well as
# preloading the objects involved, if asked.
def preload_hook(rows):
person_ids = set()
work_items_by_spec = defaultdict(list)
for spec in rows:
if need_people:
person_ids |= set(
[spec._assigneeID, spec._approverID, spec._drafterID])
if need_branches:
get_property_cache(spec).linked_branches = []
if need_workitems:
work_items = load_referencing(
SpecificationWorkItem, rows, ['specification_id'],
extra_conditions=[SpecificationWorkItem.deleted == False])
for workitem in work_items:
person_ids.add(workitem.assignee_id)
work_items_by_spec[workitem.specification_id].append(workitem)
person_ids -= set([None])
if need_people:
list(getUtility(IPersonSet).getPrecachedPersonsFromIDs(
person_ids, need_validity=True))
if need_workitems:
for spec in rows:
get_property_cache(spec).work_items = sorted(
work_items_by_spec[spec.id], key=lambda wi: wi.sequence)
if need_branches:
spec_branches = load_referencing(
SpecificationBranch, rows, ['specificationID'])
for sbranch in spec_branches:
spec_cache = get_property_cache(sbranch.specification)
spec_cache.linked_branches.append(sbranch)
decorators = []
if user is not None and not IPersonRoles(user).in_admin:
decorators.append(_make_cache_user_can_view_spec(user))
results = store.using(*tables).find(
Specification, *clauses).order_by(*order).config(limit=quantity)
return DecoratedResultSet(
results,
lambda row: reduce(lambda task, dec: dec(task), decorators, row),
pre_iter_hook=preload_hook)
def get_specification_active_product_filter(context):
if (IDistribution.providedBy(context) or IDistroSeries.providedBy(context)
or IProduct.providedBy(context) or IProductSeries.providedBy(context)):
return [], []
from lp.registry.model.product import Product
tables = [
LeftJoin(Product, Specification.productID == Product.id)]
active_products = (
Or(Specification.product == None, Product.active == True))
return tables, [active_products]
def get_specification_privacy_filter(user):
# Circular imports.
from lp.registry.model.accesspolicy import AccessPolicyGrant
public_spec_filter = (
Specification.information_type.is_in(PUBLIC_INFORMATION_TYPES))
if user is None:
return [public_spec_filter]
elif IPersonRoles.providedBy(user):
user = user.person
artifact_grant_query = Coalesce(
ArrayIntersects(
SQL('Specification.access_grants'),
Select(
ArrayAgg(TeamParticipation.teamID),
tables=TeamParticipation,
where=(TeamParticipation.person == user)
)), False)
policy_grant_query = Coalesce(
ArrayIntersects(
Array(SQL('Specification.access_policy')),
Select(
ArrayAgg(AccessPolicyGrant.policy_id),
tables=(AccessPolicyGrant,
Join(TeamParticipation,
TeamParticipation.teamID ==
AccessPolicyGrant.grantee_id)),
where=(TeamParticipation.person == user)
)), False)
return [Or(public_spec_filter, artifact_grant_query, policy_grant_query)]
def get_specification_filters(filter, goalstatus=True):
"""Return a list of Storm expressions for filtering Specifications.
:param filters: A collection of SpecificationFilter and/or strings.
Strings are used for text searches.
"""
clauses = []
# ALL is the trump card.
if SpecificationFilter.ALL in filter:
return clauses
# Look for informational specs.
if SpecificationFilter.INFORMATIONAL in filter:
clauses.append(
Specification.implementation_status ==
SpecificationImplementationStatus.INFORMATIONAL)
# Filter based on completion. See the implementation of
# Specification.is_complete() for more details.
if SpecificationFilter.COMPLETE in filter:
clauses.append(get_specification_completeness_clause())
if SpecificationFilter.INCOMPLETE in filter:
clauses.append(Not(get_specification_completeness_clause()))
# Filter for goal status.
if goalstatus:
goalstatus = None
if SpecificationFilter.ACCEPTED in filter:
goalstatus = SpecificationGoalStatus.ACCEPTED
elif SpecificationFilter.PROPOSED in filter:
goalstatus = SpecificationGoalStatus.PROPOSED
elif SpecificationFilter.DECLINED in filter:
goalstatus = SpecificationGoalStatus.DECLINED
if goalstatus:
clauses.append(Specification.goalstatus == goalstatus)
if SpecificationFilter.STARTED in filter:
clauses.append(get_specification_started_clause())
# Filter for validity. If we want valid specs only, then we should exclude
# all OBSOLETE or SUPERSEDED specs.
if SpecificationFilter.VALID in filter:
clauses.append(Not(Specification.definition_status.is_in([
SpecificationDefinitionStatus.OBSOLETE,
SpecificationDefinitionStatus.SUPERSEDED])))
# Filter for specification text.
for constraint in filter:
if isinstance(constraint, basestring):
# A string in the filter is a text search filter.
clauses.append(fti_search(Specification, constraint))
return clauses
def _make_cache_user_can_view_spec(user):
userid = user.id
def cache_user_can_view_spec(spec):
get_property_cache(spec)._known_viewers = set([userid])
return spec
return cache_user_can_view_spec
def get_specification_started_clause():
return Or(Not(Specification.implementation_status.is_in([
SpecificationImplementationStatus.UNKNOWN,
SpecificationImplementationStatus.NOTSTARTED,
SpecificationImplementationStatus.DEFERRED,
SpecificationImplementationStatus.INFORMATIONAL])),
And(Specification.implementation_status ==
SpecificationImplementationStatus.INFORMATIONAL,
Specification.definition_status ==
SpecificationDefinitionStatus.APPROVED))
def get_specification_completeness_clause():
return Or(
Specification.implementation_status ==
SpecificationImplementationStatus.IMPLEMENTED,
Specification.definition_status.is_in([
SpecificationDefinitionStatus.OBSOLETE,
SpecificationDefinitionStatus.SUPERSEDED,
]),
And(
Specification.implementation_status ==
SpecificationImplementationStatus.INFORMATIONAL,
Specification.definition_status ==
SpecificationDefinitionStatus.APPROVED))
|
DarkPhoenix6/My_Libraries
|
refs/heads/master
|
Python/AES/StateArray.py
|
1
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future import standard_library
standard_library.install_aliases()
import os
from AES.matrix import Matrix, MISSING
from AES.gen_key_schedule import get_round_keys, set_key
from AES import SubBytesAndMixColumns as SBMC
from Utils import utils
class StateArray(Matrix):
MIX_COLUMNS_ARRAY = SBMC.MIX_COLUMNS_ARRAY
INV_MIX_COLUMNS_ARRAY = SBMC.INV_MIX_COLUMNS_ARRAY
BYTES_SUB_TABLE = SBMC.BYTES_SUB_TABLE
BYTES_SUB_TABLE_INV = SBMC.BYTES_SUB_TABLE_INV
def __init__(self, matrix_list=[0] * 16, rounds=14, key=MISSING):
if len(matrix_list) != 16:
Matrix.__init__(self, [0] * 16, 4, 4)
else:
Matrix.__init__(self, list(matrix_list), 4, 4)
self.rounds = rounds
if key == MISSING:
if self.rounds == 14:
k = 32
elif self.rounds == 12:
k = 24
elif self.rounds == 10:
k = 16
else:
k = 32
self.key = os.urandom(k)
else:
self.key = key
if len(self.key) == 32:
self.rounds = 14
elif len(self.key) == 24:
self.rounds = 12
else:
self.rounds = 10
self.round_keys = get_round_keys(self.key)
self.transpose()
def add_round_key(self, round_key, transpose=False):
for i in range(self.rows):
for j in range(self.columns):
if transpose:
self.state[i][j] = Matrix.gf_add(self.state[i][j], round_key[j][i])
else:
self.state[i][j] = Matrix.gf_add(self.state[i][j], round_key[i][j])
def add_iv(self, iv):
self.add_round_key(iv, False)
def AES_encrypt(self, round_keys_key=MISSING, transpose_round_key=False):
for i in range(0, (self.rounds + 1)):
t = []
for j in self.round_keys[i * 4:i * 4 + 4]:
t += j
round_key = Matrix.generate_matrix(t, 4, 4)
round_key.transpose()
if i == 0:
self.add_round_key(round_key, transpose_round_key)
# self.print_state()
elif i == self.rounds:
self.subbytes()
# self.print_state()
self.shift_rows()
# self.print_state()
self.add_round_key(round_key, transpose_round_key)
# self.print_state()
else:
self.subbytes()
# self.print_state()
self.shift_rows()
# self.print_state()
self.mix_columns()
# self.print_state()
self.add_round_key(round_key, transpose_round_key)
# self.print_state()
def subbytes(self):
for i in range(self.rows):
for j in range(self.columns):
self.state[i][j] = StateArray.BYTES_SUB_TABLE[self.state[i][j]]
def mix_columns(self):
s = self.generate_empty_state_array()
for j in range(4):
for i in range(4):
s[i][j] = StateArray.mul_column(self.state, i, j)
self.state = s
def generate_empty_state_array(self):
s = [[0 for x in range(self.columns)] for y in range(self.rows)]
return s
def shift_rows(self):
for i in range(4):
self.state[i][0 - i], self.state[i][1 - i], self.state[i][2 - i], self.state[i][3 - i] = self.state[i][0], \
self.state[i][1], \
self.state[i][2], \
self.state[i][3]
def inv_mix_columns(self):
s = self.generate_empty_state_array()
for j in range(4):
for i in range(4):
s[i][j] = StateArray.mul_column(self.state, i, j, StateArray.INV_MIX_COLUMNS_ARRAY)
self.state = s
@staticmethod
def mul_column(state, row, column, mix_columns_array=MIX_COLUMNS_ARRAY):
s0 = StateArray.gf_mul(state[0][column], mix_columns_array.state[row][0])
s1 = StateArray.gf_mul(state[1][column], mix_columns_array.state[row][1])
s2 = StateArray.gf_mul(state[2][column], mix_columns_array.state[row][2])
s3 = StateArray.gf_mul(state[3][column], mix_columns_array.state[row][3])
s5 = StateArray.gf_add(s0, s1)
s6 = StateArray.gf_add(s5, s2)
return StateArray.gf_add(s6, s3)
def shift_column_one_down(self):
self.state[0][1], self.state[1][1], self.state[2][1], self.state[3][1] = self.state[3][1], self.state[0][1], \
self.state[1][1], self.state[2][1],
def CF_hash(self, transpose_round_key=False, iv=[[222 for x in range(4)] for y in range(4)]):
for i in range(0, (self.rounds + 1)):
t = []
for j in self.round_keys[i * 4:i * 4 + 4]:
t += j
round_key = Matrix.generate_matrix(t, 4, 4)
if i == 0:
self.add_iv(iv)
self.subbytes()
self.mix_columns()
self.add_round_key(round_key, transpose_round_key)
elif i == self.rounds:
self.subbytes()
self.CF_hash_block_stages()
self.add_round_key(round_key, transpose_round_key)
else:
self.subbytes()
self.CF_hash_block_stages()
self.mix_columns()
self.subbytes()
self.add_round_key(round_key, transpose_round_key)
def CF_hash_block_stages(self):
self.CF_hash_stage_1()
self.CF_hash_stage_2()
self.CF_hash_stage_3()
def CF_hash_stage_3(self):
self.shift_rows()
self.rotate_matrix_counterclockwise()
self.rows_down()
self.transpose()
self.shift_column_one_down()
def CF_hash_stage_2(self):
self.shift_rows()
self.rows_down()
def CF_hash_stage_1(self):
self.CF_hash_stage_2()
self.transpose()
def time_it(method_to_run):
global key3, b, t1, t2, a, k, p, t3, t4, o
from time import clock as now
# b.AES_encrypt()
t1 = now()
t3 = now()
t4 = now()
k = 32
p = 1000000
for i in range(p):
if i % 1000 == 0:
t3 = now()
print(i, a*i, o*i, t3-t1, t3-t4)
t4 = t3
# print('\n' + str(i))
method_to_run()
round_keys = get_round_keys(os.urandom(k))
# b.CF_hash_block_stages()
# b.CF_hash()
# b.AES_encrypt()
# b.shift_column_one_down()
# b.print_state()
# # print(b.round_keys[0:4])
# print('\n')
t2 = now()
print(p, p*a, p*o, str(t2 - t1), t2-t3)
if __name__ == '__main__':
global b, key3, a, o
key2 = 'hello'
key2 = set_key(key2, 256)
key3 = bytes(key2, 'ascii')
a = 14
o = 15
b = StateArray([0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x08,
0x09, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10], rounds=a, key=key3)
print(b.__dict__)
b.print_state()
time_it(b.AES_encrypt)
b.print_state()
# b.rows_down()
# b.print_state()
#
# b.AES_encrypt()
#
# print('\n')
# b.print_state()
#
c = StateArray([0x01] * 16, rounds=10, key=key3)
print('\n')
# c.print_state()
#
# for j in range(10):
# c.AES_encrypt()
#
# print('\n')
# c.print_state()
print(utils.memory_address(b.BYTES_SUB_TABLE))
print(utils.memory_address(c.BYTES_SUB_TABLE))
print(utils.memory_address(b.BYTES_SUB_TABLE_INV))
print(utils.memory_address(c.BYTES_SUB_TABLE_INV))
print(utils.memory_address(b.MIX_COLUMNS_ARRAY))
print(utils.memory_address(c.MIX_COLUMNS_ARRAY))
print(utils.memory_address(b.INV_MIX_COLUMNS_ARRAY))
print(utils.memory_address(c.INV_MIX_COLUMNS_ARRAY))
|
paulproteus/django
|
refs/heads/master
|
tests/modeltests/get_latest/tests.py
|
89
|
from __future__ import absolute_import
from datetime import datetime
from django.test import TestCase
from .models import Article, Person
class LatestTests(TestCase):
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.latest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(),
a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest('expire_date'), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest('expire_date'),
a3,
)
# Ensure that latest() overrides any other ordering specified on the query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').latest(), a4)
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
p1 = Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
self.assertRaises(AssertionError, Person.objects.latest)
self.assertEqual(Person.objects.latest("birthday"), p2)
|
enormandeau/Scripts
|
refs/heads/master
|
fastq_correct.py
|
2
|
#!/usr/bin/env python
"""Add sequence name before quality string if missing.
Usage:
%program <input_file> <output_file>"""
import sys
try:
in_file = sys.argv[1]
out_file = sys.argv[2]
except:
print __doc__
sys.exit(0)
with open(in_file) as f:
with open(out_file, "w") as out_f:
for line in f:
if line.startswith("@"):
out_f.write(line)
temp = line.replace("@", "+")
elif line.startswith("+"):
out_f.write(temp)
else:
out_f.write(line)
|
johnkit/vtk-dev
|
refs/heads/master
|
Rendering/Core/Testing/Python/PickerWithLocator.py
|
21
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# Do picking with a locator to speed things up
#
# renderer and interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read the volume
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetImageRange(1, 93)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + '/Data/headsq/quarter')
v16.SetDataSpacing(3.2, 3.2, 1.5)
#---------------------------------------------------------
# Do the surface rendering
boneExtractor = vtk.vtkMarchingCubes()
boneExtractor.SetInputConnection(v16.GetOutputPort())
boneExtractor.SetValue(0, 1150)
boneNormals = vtk.vtkPolyDataNormals()
boneNormals.SetInputConnection(boneExtractor.GetOutputPort())
boneNormals.SetFeatureAngle(60.0)
boneStripper = vtk.vtkStripper()
boneStripper.SetInputConnection(boneNormals.GetOutputPort())
boneStripper.SetMaximumLength(5)
boneLocator = vtk.vtkCellLocator()
boneLocator.SetDataSet(boneStripper.GetOutput())
boneLocator.LazyEvaluationOn()
boneMapper = vtk.vtkPolyDataMapper()
boneMapper.SetInputConnection(boneStripper.GetOutputPort())
boneMapper.ScalarVisibilityOff()
boneProperty = vtk.vtkProperty()
boneProperty.SetColor(1.0, 1.0, 0.9)
bone = vtk.vtkActor()
bone.SetMapper(boneMapper)
bone.SetProperty(boneProperty)
#---------------------------------------------------------
# Create an image actor
table = vtk.vtkLookupTable()
table.SetRange(0, 2000)
table.SetRampToLinear()
table.SetValueRange(0, 1)
table.SetHueRange(0, 0)
table.SetSaturationRange(0, 0)
mapToColors = vtk.vtkImageMapToColors()
mapToColors.SetInputConnection(v16.GetOutputPort())
mapToColors.SetLookupTable(table)
imageActor = vtk.vtkImageActor()
imageActor.GetMapper().SetInputConnection(mapToColors.GetOutputPort())
imageActor.SetDisplayExtent(32, 32, 0, 63, 0, 92)
#---------------------------------------------------------
# make a clipping plane
cx = 100.8
cy = 100.8
cz = 69.0
boneClip = vtk.vtkPlane()
boneClip.SetNormal(0, 1, 0)
boneClip.SetOrigin(cx, cy, cz)
boneMapper.AddClippingPlane(boneClip)
#---------------------------------------------------------
ren.AddViewProp(bone)
ren.AddViewProp(imageActor)
camera = ren.GetActiveCamera()
camera.SetFocalPoint(cx, cy, cz)
camera.SetPosition(cx + 400, cy + 100, cz - 100)
camera.SetViewUp(0, 0, -1)
ren.ResetCameraClippingRange()
renWin.Render()
#---------------------------------------------------------
# the cone should point along the Z axis
coneSource = vtk.vtkConeSource()
coneSource.CappingOn()
coneSource.SetHeight(12)
coneSource.SetRadius(5)
coneSource.SetResolution(31)
coneSource.SetCenter(6, 0, 0)
coneSource.SetDirection(-1, 0, 0)
#---------------------------------------------------------
picker = vtk.vtkCellPicker()
picker.SetTolerance(1e-6)
picker.AddLocator(boneLocator)
# A function to point an actor along a vector n = (nx,ny.nz)
def PointCone(actor, n):
if n[0] < 0.0:
actor.RotateWXYZ(180, 0, 1, 0)
actor.RotateWXYZ(180, (n[0] - 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
else:
actor.RotateWXYZ(180, (n[0] + 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
# Pick the actor
picker.Pick(70, 120, 0, ren)
#print picker
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor1 = vtk.vtkActor()
coneActor1.PickableOff()
coneMapper1 = vtk.vtkDataSetMapper()
coneMapper1.SetInputConnection(coneSource.GetOutputPort())
coneActor1.SetMapper(coneMapper1)
coneActor1.GetProperty().SetColor(1, 0, 0)
coneActor1.SetPosition(p)
PointCone(coneActor1, n)
ren.AddViewProp(coneActor1)
# Pick the image
picker.Pick(170, 220, 0, ren)
#print picker
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor2 = vtk.vtkActor()
coneActor2.PickableOff()
coneMapper2 = vtk.vtkDataSetMapper()
coneMapper2.SetInputConnection(coneSource.GetOutputPort())
coneActor2.SetMapper(coneMapper2)
coneActor2.GetProperty().SetColor(1, 0, 0)
coneActor2.SetPosition(p)
PointCone(coneActor2, n)
ren.AddViewProp(coneActor2)
# Pick the actor again, in a way that the ray gets clipped
picker.Pick(180, 220, 0, ren)
#print picker
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor3 = vtk.vtkActor()
coneActor3.PickableOff()
coneMapper3 = vtk.vtkDataSetMapper()
coneMapper3.SetInputConnection(coneSource.GetOutputPort())
coneActor3.SetMapper(coneMapper3)
coneActor3.GetProperty().SetColor(1, 0, 0)
coneActor3.SetPosition(p)
PointCone(coneActor3, n)
ren.AddViewProp(coneActor3)
ren.ResetCameraClippingRange()
renWin.Render()
#---------------------------------------------------------
# test-related code
def TkCheckAbort (__vtk__temp0=0, __vtk__temp1=0):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
pass
renWin.AddObserver("AbortCheckEvent", TkCheckAbort)
|
fertozudo/umatoo
|
refs/heads/master
|
lib/django/core/handlers/wsgi.py
|
339
|
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
|
vickenty/ookoobah
|
refs/heads/master
|
pyglet-c9188efc2e30/pyglet/libs/x11/xf86vmode.py
|
46
|
'''Wrapper for Xxf86vm
Generated with:
tools/genwrappers.py xf86vmode
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xxf86vm')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.libs.x11.xlib
X_XF86VidModeQueryVersion = 0 # /usr/include/X11/extensions/xf86vmode.h:4885
X_XF86VidModeGetModeLine = 1 # /usr/include/X11/extensions/xf86vmode.h:4886
X_XF86VidModeModModeLine = 2 # /usr/include/X11/extensions/xf86vmode.h:4887
X_XF86VidModeSwitchMode = 3 # /usr/include/X11/extensions/xf86vmode.h:4888
X_XF86VidModeGetMonitor = 4 # /usr/include/X11/extensions/xf86vmode.h:4889
X_XF86VidModeLockModeSwitch = 5 # /usr/include/X11/extensions/xf86vmode.h:4890
X_XF86VidModeGetAllModeLines = 6 # /usr/include/X11/extensions/xf86vmode.h:4891
X_XF86VidModeAddModeLine = 7 # /usr/include/X11/extensions/xf86vmode.h:4892
X_XF86VidModeDeleteModeLine = 8 # /usr/include/X11/extensions/xf86vmode.h:4893
X_XF86VidModeValidateModeLine = 9 # /usr/include/X11/extensions/xf86vmode.h:4894
X_XF86VidModeSwitchToMode = 10 # /usr/include/X11/extensions/xf86vmode.h:4895
X_XF86VidModeGetViewPort = 11 # /usr/include/X11/extensions/xf86vmode.h:4896
X_XF86VidModeSetViewPort = 12 # /usr/include/X11/extensions/xf86vmode.h:4897
X_XF86VidModeGetDotClocks = 13 # /usr/include/X11/extensions/xf86vmode.h:4899
X_XF86VidModeSetClientVersion = 14 # /usr/include/X11/extensions/xf86vmode.h:4900
X_XF86VidModeSetGamma = 15 # /usr/include/X11/extensions/xf86vmode.h:4901
X_XF86VidModeGetGamma = 16 # /usr/include/X11/extensions/xf86vmode.h:4902
X_XF86VidModeGetGammaRamp = 17 # /usr/include/X11/extensions/xf86vmode.h:4903
X_XF86VidModeSetGammaRamp = 18 # /usr/include/X11/extensions/xf86vmode.h:4904
X_XF86VidModeGetGammaRampSize = 19 # /usr/include/X11/extensions/xf86vmode.h:4905
X_XF86VidModeGetPermissions = 20 # /usr/include/X11/extensions/xf86vmode.h:4906
CLKFLAG_PROGRAMABLE = 1 # /usr/include/X11/extensions/xf86vmode.h:4908
XF86VidModeNumberEvents = 0 # /usr/include/X11/extensions/xf86vmode.h:4919
XF86VidModeBadClock = 0 # /usr/include/X11/extensions/xf86vmode.h:4922
XF86VidModeBadHTimings = 1 # /usr/include/X11/extensions/xf86vmode.h:4923
XF86VidModeBadVTimings = 2 # /usr/include/X11/extensions/xf86vmode.h:4924
XF86VidModeModeUnsuitable = 3 # /usr/include/X11/extensions/xf86vmode.h:4925
XF86VidModeExtensionDisabled = 4 # /usr/include/X11/extensions/xf86vmode.h:4926
XF86VidModeClientNotLocal = 5 # /usr/include/X11/extensions/xf86vmode.h:4927
XF86VidModeZoomLocked = 6 # /usr/include/X11/extensions/xf86vmode.h:4928
XF86VidModeNumberErrors = 7 # /usr/include/X11/extensions/xf86vmode.h:4929
XF86VM_READ_PERMISSION = 1 # /usr/include/X11/extensions/xf86vmode.h:4931
XF86VM_WRITE_PERMISSION = 2 # /usr/include/X11/extensions/xf86vmode.h:4932
class struct_anon_93(Structure):
__slots__ = [
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
INT32 = c_int # /usr/include/X11/Xmd.h:135
struct_anon_93._fields_ = [
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
XF86VidModeModeLine = struct_anon_93 # /usr/include/X11/extensions/xf86vmode.h:4954
class struct_anon_94(Structure):
__slots__ = [
'dotclock',
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
struct_anon_94._fields_ = [
('dotclock', c_uint),
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
XF86VidModeModeInfo = struct_anon_94 # /usr/include/X11/extensions/xf86vmode.h:4975
class struct_anon_95(Structure):
__slots__ = [
'hi',
'lo',
]
struct_anon_95._fields_ = [
('hi', c_float),
('lo', c_float),
]
XF86VidModeSyncRange = struct_anon_95 # /usr/include/X11/extensions/xf86vmode.h:4980
class struct_anon_96(Structure):
__slots__ = [
'vendor',
'model',
'EMPTY',
'nhsync',
'hsync',
'nvsync',
'vsync',
]
struct_anon_96._fields_ = [
('vendor', c_char_p),
('model', c_char_p),
('EMPTY', c_float),
('nhsync', c_ubyte),
('hsync', POINTER(XF86VidModeSyncRange)),
('nvsync', c_ubyte),
('vsync', POINTER(XF86VidModeSyncRange)),
]
XF86VidModeMonitor = struct_anon_96 # /usr/include/X11/extensions/xf86vmode.h:4990
class struct_anon_97(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'root',
'state',
'kind',
'forced',
'time',
]
Display = pyglet.libs.x11.xlib.Display
Window = pyglet.libs.x11.xlib.Window
Time = pyglet.libs.x11.xlib.Time
struct_anon_97._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('root', Window),
('state', c_int),
('kind', c_int),
('forced', c_int),
('time', Time),
]
XF86VidModeNotifyEvent = struct_anon_97 # /usr/include/X11/extensions/xf86vmode.h:5002
class struct_anon_98(Structure):
__slots__ = [
'red',
'green',
'blue',
]
struct_anon_98._fields_ = [
('red', c_float),
('green', c_float),
('blue', c_float),
]
XF86VidModeGamma = struct_anon_98 # /usr/include/X11/extensions/xf86vmode.h:5008
# /usr/include/X11/extensions/xf86vmode.h:5018
XF86VidModeQueryVersion = _lib.XF86VidModeQueryVersion
XF86VidModeQueryVersion.restype = c_int
XF86VidModeQueryVersion.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5024
XF86VidModeQueryExtension = _lib.XF86VidModeQueryExtension
XF86VidModeQueryExtension.restype = c_int
XF86VidModeQueryExtension.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5030
XF86VidModeSetClientVersion = _lib.XF86VidModeSetClientVersion
XF86VidModeSetClientVersion.restype = c_int
XF86VidModeSetClientVersion.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/xf86vmode.h:5034
XF86VidModeGetModeLine = _lib.XF86VidModeGetModeLine
XF86VidModeGetModeLine.restype = c_int
XF86VidModeGetModeLine.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5041
XF86VidModeGetAllModeLines = _lib.XF86VidModeGetAllModeLines
XF86VidModeGetAllModeLines.restype = c_int
XF86VidModeGetAllModeLines.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(POINTER(POINTER(XF86VidModeModeInfo)))]
# /usr/include/X11/extensions/xf86vmode.h:5048
XF86VidModeAddModeLine = _lib.XF86VidModeAddModeLine
XF86VidModeAddModeLine.restype = c_int
XF86VidModeAddModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo), POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5055
XF86VidModeDeleteModeLine = _lib.XF86VidModeDeleteModeLine
XF86VidModeDeleteModeLine.restype = c_int
XF86VidModeDeleteModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5061
XF86VidModeModModeLine = _lib.XF86VidModeModModeLine
XF86VidModeModModeLine.restype = c_int
XF86VidModeModModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5067
XF86VidModeValidateModeLine = _lib.XF86VidModeValidateModeLine
XF86VidModeValidateModeLine.restype = c_int
XF86VidModeValidateModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5073
XF86VidModeSwitchMode = _lib.XF86VidModeSwitchMode
XF86VidModeSwitchMode.restype = c_int
XF86VidModeSwitchMode.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5079
XF86VidModeSwitchToMode = _lib.XF86VidModeSwitchToMode
XF86VidModeSwitchToMode.restype = c_int
XF86VidModeSwitchToMode.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5085
XF86VidModeLockModeSwitch = _lib.XF86VidModeLockModeSwitch
XF86VidModeLockModeSwitch.restype = c_int
XF86VidModeLockModeSwitch.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5091
XF86VidModeGetMonitor = _lib.XF86VidModeGetMonitor
XF86VidModeGetMonitor.restype = c_int
XF86VidModeGetMonitor.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeMonitor)]
# /usr/include/X11/extensions/xf86vmode.h:5097
XF86VidModeGetViewPort = _lib.XF86VidModeGetViewPort
XF86VidModeGetViewPort.restype = c_int
XF86VidModeGetViewPort.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5104
XF86VidModeSetViewPort = _lib.XF86VidModeSetViewPort
XF86VidModeSetViewPort.restype = c_int
XF86VidModeSetViewPort.argtypes = [POINTER(Display), c_int, c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5111
XF86VidModeGetDotClocks = _lib.XF86VidModeGetDotClocks
XF86VidModeGetDotClocks.restype = c_int
XF86VidModeGetDotClocks.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(POINTER(c_int))]
# /usr/include/X11/extensions/xf86vmode.h:5120
XF86VidModeGetGamma = _lib.XF86VidModeGetGamma
XF86VidModeGetGamma.restype = c_int
XF86VidModeGetGamma.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5126
XF86VidModeSetGamma = _lib.XF86VidModeSetGamma
XF86VidModeSetGamma.restype = c_int
XF86VidModeSetGamma.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5132
XF86VidModeSetGammaRamp = _lib.XF86VidModeSetGammaRamp
XF86VidModeSetGammaRamp.restype = c_int
XF86VidModeSetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5141
XF86VidModeGetGammaRamp = _lib.XF86VidModeGetGammaRamp
XF86VidModeGetGammaRamp.restype = c_int
XF86VidModeGetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5150
XF86VidModeGetGammaRampSize = _lib.XF86VidModeGetGammaRampSize
XF86VidModeGetGammaRampSize.restype = c_int
XF86VidModeGetGammaRampSize.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5156
XF86VidModeGetPermissions = _lib.XF86VidModeGetPermissions
XF86VidModeGetPermissions.restype = c_int
XF86VidModeGetPermissions.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
__all__ = ['X_XF86VidModeQueryVersion', 'X_XF86VidModeGetModeLine',
'X_XF86VidModeModModeLine', 'X_XF86VidModeSwitchMode',
'X_XF86VidModeGetMonitor', 'X_XF86VidModeLockModeSwitch',
'X_XF86VidModeGetAllModeLines', 'X_XF86VidModeAddModeLine',
'X_XF86VidModeDeleteModeLine', 'X_XF86VidModeValidateModeLine',
'X_XF86VidModeSwitchToMode', 'X_XF86VidModeGetViewPort',
'X_XF86VidModeSetViewPort', 'X_XF86VidModeGetDotClocks',
'X_XF86VidModeSetClientVersion', 'X_XF86VidModeSetGamma',
'X_XF86VidModeGetGamma', 'X_XF86VidModeGetGammaRamp',
'X_XF86VidModeSetGammaRamp', 'X_XF86VidModeGetGammaRampSize',
'X_XF86VidModeGetPermissions', 'CLKFLAG_PROGRAMABLE',
'XF86VidModeNumberEvents', 'XF86VidModeBadClock', 'XF86VidModeBadHTimings',
'XF86VidModeBadVTimings', 'XF86VidModeModeUnsuitable',
'XF86VidModeExtensionDisabled', 'XF86VidModeClientNotLocal',
'XF86VidModeZoomLocked', 'XF86VidModeNumberErrors', 'XF86VM_READ_PERMISSION',
'XF86VM_WRITE_PERMISSION', 'XF86VidModeModeLine', 'XF86VidModeModeInfo',
'XF86VidModeSyncRange', 'XF86VidModeMonitor', 'XF86VidModeNotifyEvent',
'XF86VidModeGamma', 'XF86VidModeQueryVersion', 'XF86VidModeQueryExtension',
'XF86VidModeSetClientVersion', 'XF86VidModeGetModeLine',
'XF86VidModeGetAllModeLines', 'XF86VidModeAddModeLine',
'XF86VidModeDeleteModeLine', 'XF86VidModeModModeLine',
'XF86VidModeValidateModeLine', 'XF86VidModeSwitchMode',
'XF86VidModeSwitchToMode', 'XF86VidModeLockModeSwitch',
'XF86VidModeGetMonitor', 'XF86VidModeGetViewPort', 'XF86VidModeSetViewPort',
'XF86VidModeGetDotClocks', 'XF86VidModeGetGamma', 'XF86VidModeSetGamma',
'XF86VidModeSetGammaRamp', 'XF86VidModeGetGammaRamp',
'XF86VidModeGetGammaRampSize', 'XF86VidModeGetPermissions']
|
kbdick/RecycleTracker
|
refs/heads/gh-pages
|
recyclecollector/scrap/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/tlslite/utils/PyCrypto_RSAKey.py
|
361
|
"""PyCrypto RSA implementation."""
from cryptomath import *
from RSAKey import *
from Python_RSAKey import Python_RSAKey
if pycryptoLoaded:
from Crypto.PublicKey import RSA
class PyCrypto_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if not d:
self.rsa = RSA.construct( (n, e) )
else:
self.rsa = RSA.construct( (n, e, d, p, q) )
def __getattr__(self, name):
return getattr(self.rsa, name)
def hasPrivateKey(self):
return self.rsa.has_private()
def hash(self):
return Python_RSAKey(self.n, self.e).hash()
def _rawPrivateKeyOp(self, m):
s = numberToString(m)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
c = stringToNumber(self.rsa.decrypt((s,)))
return c
def _rawPublicKeyOp(self, c):
s = numberToString(c)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
m = stringToNumber(self.rsa.encrypt(s, None)[0])
return m
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = PyCrypto_RSAKey()
def f(numBytes):
return bytesToString(getRandomBytes(numBytes))
key.rsa = RSA.generate(bits, f)
return key
generate = staticmethod(generate)
|
swannapa/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/leave_allocation/leave_allocation.py
|
33
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, date_diff, formatdate
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
from erpnext.hr.doctype.leave_application.leave_application import get_approved_leaves_for_period
class OverlapError(frappe.ValidationError): pass
class BackDatedAllocationError(frappe.ValidationError): pass
class OverAllocationError(frappe.ValidationError): pass
class LessAllocationError(frappe.ValidationError): pass
class ValueMultiplierError(frappe.ValidationError): pass
class LeaveAllocation(Document):
def validate(self):
self.validate_period()
self.validate_new_leaves_allocated_value()
self.validate_allocation_overlap()
self.validate_back_dated_allocation()
self.set_total_leaves_allocated()
self.validate_total_leaves_allocated()
self.validate_lwp()
set_employee_name(self)
def on_update_after_submit(self):
self.validate_new_leaves_allocated_value()
self.set_total_leaves_allocated()
frappe.db.set(self,'carry_forwarded_leaves', flt(self.carry_forwarded_leaves))
frappe.db.set(self,'total_leaves_allocated',flt(self.total_leaves_allocated))
self.validate_against_leave_applications()
def validate_period(self):
if date_diff(self.to_date, self.from_date) <= 0:
frappe.throw(_("To date cannot be before from date"))
def validate_lwp(self):
if frappe.db.get_value("Leave Type", self.leave_type, "is_lwp"):
frappe.throw(_("Leave Type {0} cannot be allocated since it is leave without pay").format(self.leave_type))
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError)
def validate_allocation_overlap(self):
leave_allocation = frappe.db.sql("""
select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1
and to_date >= %s and from_date <= %s""",
(self.employee, self.leave_type, self.from_date, self.to_date))
if leave_allocation:
frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}")
.format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date)))
frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>'
.format(leave_allocation[0][0]), OverlapError)
def validate_back_dated_allocation(self):
future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and from_date > %s
and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1)
if future_allocation:
frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}")
.format(formatdate(future_allocation[0].from_date), future_allocation[0].name),
BackDatedAllocationError)
def set_total_leaves_allocated(self):
self.carry_forwarded_leaves = get_carry_forwarded_leaves(self.employee,
self.leave_type, self.from_date, self.carry_forward)
self.total_leaves_allocated = flt(self.carry_forwarded_leaves) + flt(self.new_leaves_allocated)
if not self.total_leaves_allocated:
frappe.throw(_("Total leaves allocated is mandatory"))
def validate_total_leaves_allocated(self):
# Adding a day to include To Date in the difference
date_difference = date_diff(self.to_date, self.from_date) + 1
if date_difference < self.total_leaves_allocated:
frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError)
def validate_against_leave_applications(self):
leaves_taken = get_approved_leaves_for_period(self.employee, self.leave_type,
self.from_date, self.to_date)
if flt(leaves_taken) > flt(self.total_leaves_allocated):
if frappe.db.get_value("Leave Type", self.leave_type, "allow_negative"):
frappe.msgprint(_("Note: Total allocated leaves {0} shouldn't be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken))
else:
frappe.throw(_("Total allocated leaves {0} cannot be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken), LessAllocationError)
@frappe.whitelist()
def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None):
carry_forwarded_leaves = 0
if carry_forward:
validate_carry_forward(leave_type)
previous_allocation = frappe.db.sql("""
select name, from_date, to_date, total_leaves_allocated
from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and to_date < %s
order by to_date desc limit 1
""", (employee, leave_type, date), as_dict=1)
if previous_allocation:
leaves_taken = get_approved_leaves_for_period(employee, leave_type,
previous_allocation[0].from_date, previous_allocation[0].to_date)
carry_forwarded_leaves = flt(previous_allocation[0].total_leaves_allocated) - flt(leaves_taken)
return carry_forwarded_leaves
def validate_carry_forward(leave_type):
if not frappe.db.get_value("Leave Type", leave_type, "is_carry_forward"):
frappe.throw(_("Leave Type {0} cannot be carry-forwarded").format(leave_type))
|
int19h/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/cryptography/hazmat/primitives/kdf/kbkdf.py
|
13
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from enum import Enum
from six.moves import range
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import constant_time, hashes, hmac
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
class Mode(Enum):
CounterMode = "ctr"
class CounterLocation(Enum):
BeforeFixed = "before_fixed"
AfterFixed = "after_fixed"
@utils.register_interface(KeyDerivationFunction)
class KBKDFHMAC(object):
def __init__(self, algorithm, mode, length, rlen, llen,
location, label, context, fixed, backend):
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, hashes.HashAlgorithm):
raise UnsupportedAlgorithm(
"Algorithm supplied is not a supported hash algorithm.",
_Reasons.UNSUPPORTED_HASH
)
if not backend.hmac_supported(algorithm):
raise UnsupportedAlgorithm(
"Algorithm supplied is not a supported hmac algorithm.",
_Reasons.UNSUPPORTED_HASH
)
if not isinstance(mode, Mode):
raise TypeError("mode must be of type Mode")
if not isinstance(location, CounterLocation):
raise TypeError("location must be of type CounterLocation")
if (label or context) and fixed:
raise ValueError("When supplying fixed data, "
"label and context are ignored.")
if rlen is None or not self._valid_byte_length(rlen):
raise ValueError("rlen must be between 1 and 4")
if llen is None and fixed is None:
raise ValueError("Please specify an llen")
if llen is not None and not isinstance(llen, int):
raise TypeError("llen must be an integer")
if label is None:
label = b''
if context is None:
context = b''
utils._check_bytes("label", label)
utils._check_bytes("context", context)
self._algorithm = algorithm
self._mode = mode
self._length = length
self._rlen = rlen
self._llen = llen
self._location = location
self._label = label
self._context = context
self._backend = backend
self._used = False
self._fixed_data = fixed
def _valid_byte_length(self, value):
if not isinstance(value, int):
raise TypeError('value must be of type int')
value_bin = utils.int_to_bytes(1, value)
if not 1 <= len(value_bin) <= 4:
return False
return True
def derive(self, key_material):
if self._used:
raise AlreadyFinalized
utils._check_byteslike("key_material", key_material)
self._used = True
# inverse floor division (equivalent to ceiling)
rounds = -(-self._length // self._algorithm.digest_size)
output = [b'']
# For counter mode, the number of iterations shall not be
# larger than 2^r-1, where r <= 32 is the binary length of the counter
# This ensures that the counter values used as an input to the
# PRF will not repeat during a particular call to the KDF function.
r_bin = utils.int_to_bytes(1, self._rlen)
if rounds > pow(2, len(r_bin) * 8) - 1:
raise ValueError('There are too many iterations.')
for i in range(1, rounds + 1):
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
counter = utils.int_to_bytes(i, self._rlen)
if self._location == CounterLocation.BeforeFixed:
h.update(counter)
h.update(self._generate_fixed_input())
if self._location == CounterLocation.AfterFixed:
h.update(counter)
output.append(h.finalize())
return b''.join(output)[:self._length]
def _generate_fixed_input(self):
if self._fixed_data and isinstance(self._fixed_data, bytes):
return self._fixed_data
l_val = utils.int_to_bytes(self._length * 8, self._llen)
return b"".join([self._label, b"\x00", self._context, l_val])
def verify(self, key_material, expected_key):
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
|
bbozhev/flask-test
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/analysis/__init__.py
|
96
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""Classes and functions for turning a piece of text into an indexable stream
of "tokens" (usually equivalent to words). There are three general classes
involved in analysis:
* Tokenizers are always at the start of the text processing pipeline. They take
a string and yield Token objects (actually, the same token object over and
over, for performance reasons) corresponding to the tokens (words) in the
text.
Every tokenizer is a callable that takes a string and returns an iterator of
tokens.
* Filters take the tokens from the tokenizer and perform various
transformations on them. For example, the LowercaseFilter converts all tokens
to lowercase, which is usually necessary when indexing regular English text.
Every filter is a callable that takes a token generator and returns a token
generator.
* Analyzers are convenience functions/classes that "package up" a tokenizer and
zero or more filters into a single unit. For example, the StandardAnalyzer
combines a RegexTokenizer, LowercaseFilter, and StopFilter.
Every analyzer is a callable that takes a string and returns a token
iterator. (So Tokenizers can be used as Analyzers if you don't need any
filtering).
You can compose tokenizers and filters together using the ``|`` character::
my_analyzer = RegexTokenizer() | LowercaseFilter() | StopFilter()
The first item must be a tokenizer and the rest must be filters (you can't put
a filter first or a tokenizer after the first item).
"""
from whoosh.analysis.acore import *
from whoosh.analysis.tokenizers import *
from whoosh.analysis.filters import *
from whoosh.analysis.morph import *
from whoosh.analysis.intraword import *
from whoosh.analysis.ngrams import *
from whoosh.analysis.analyzers import *
|
xianjunzhengbackup/code
|
refs/heads/master
|
http/REST/Building-RESTful-Python-Web-Services-master/Chapter 4/restful_python_chapter_04_01/gamesapi/gamesapi/wsgi.py
|
15
|
"""
WSGI config for gamesapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gamesapi.settings")
application = get_wsgi_application()
|
evangeline97/localwiki-backend-server
|
refs/heads/master
|
localwiki/regions/tests/__init__.py
|
12
|
from .test_main import *
from .test_api import *
|
UQ-UQx/edx-platform_lti
|
refs/heads/master
|
lms/djangoapps/dashboard/models.py
|
12
|
"""Models for dashboard application"""
import mongoengine
from xmodule.modulestore.mongoengine_fields import CourseKeyField
class CourseImportLog(mongoengine.Document):
"""Mongoengine model for git log"""
# pylint: disable=incomplete-protocol
course_id = CourseKeyField(max_length=128)
# NOTE: this location is not a Location object but a pathname
location = mongoengine.StringField(max_length=168)
import_log = mongoengine.StringField(max_length=20 * 65535)
git_log = mongoengine.StringField(max_length=65535)
repo_dir = mongoengine.StringField(max_length=128)
created = mongoengine.DateTimeField()
meta = {'indexes': ['course_id', 'created'],
'allow_inheritance': False}
|
vdmann/cse-360-image-hosting-website
|
refs/heads/master
|
lib/python2.7/site-packages/south/management/commands/convert_to_south.py
|
129
|
"""
Quick conversion command module.
"""
from __future__ import print_function
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.conf import settings
from django.db import models
from django.core import management
from django.core.exceptions import ImproperlyConfigured
from south.migration import Migrations
from south.hacks import hacks
from south.exceptions import NoMigrations
class Command(BaseCommand):
option_list = BaseCommand.option_list
if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
option_list += (
make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False,
help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."),
make_option('--ignore-ghost-migrations', action='store_true', dest='ignore_ghosts', default=False,
help="Tells South to ignore any 'ghost' migrations (ones in the database but not on disk) and continue to apply new migrations."),
)
help = "Quickly converts the named application to use South if it is currently using syncdb."
def handle(self, app=None, *args, **options):
# Make sure we have an app
if not app:
print("Please specify an app to convert.")
return
# See if the app exists
app = app.split(".")[-1]
try:
app_module = models.get_app(app)
except ImproperlyConfigured:
print("There is no enabled application matching '%s'." % app)
return
# Try to get its list of models
model_list = models.get_models(app_module)
if not model_list:
print("This application has no models; this command is for applications that already have models syncdb'd.")
print("Make some models, and then use ./manage.py schemamigration %s --initial instead." % app)
return
# Ask South if it thinks it's already got migrations
try:
Migrations(app)
except NoMigrations:
pass
else:
print("This application is already managed by South.")
return
# Finally! It seems we've got a candidate, so do the two-command trick
verbosity = int(options.get('verbosity', 0))
management.call_command("schemamigration", app, initial=True, verbosity=verbosity)
# Now, we need to re-clean and sanitise appcache
hacks.clear_app_cache()
hacks.repopulate_app_cache()
# And also clear our cached Migration classes
Migrations._clear_cache()
# Now, migrate
management.call_command(
"migrate",
app,
"0001",
fake=True,
verbosity=verbosity,
ignore_ghosts=options.get("ignore_ghosts", False),
delete_ghosts=options.get("delete_ghosts", False),
)
print()
print("App '%s' converted. Note that South assumed the application's models matched the database" % app)
print("(i.e. you haven't changed it since last syncdb); if you have, you should delete the %s/migrations" % app)
print("directory, revert models.py so it matches the database, and try again.")
|
40223220/2015cc
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/pydoc_data/topics.py
|
694
|
# -*- coding: utf-8 -*-
# Autogenerated by Sphinx on Sat Mar 23 15:42:31 2013
topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n',
'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an asterisk,\n called a "starred" target: The object must be a sequence with at\n least as many items as there are targets in the target list, minus\n one. The first items of the sequence are assigned, from left to\n right, to the targets before the starred target. The final items\n of the sequence are assigned to the targets after the starred\n target. A list of the remaining items in the sequence is then\n assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of items\n as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` or ``nonlocal``\n statement in the current code block: the name is bound to the\n object in the current local namespace.\n\n * Otherwise: the name is bound to the object in the global namespace\n or the outer namespace determined by ``nonlocal``, respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, ``IndexError`` is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the ``__setitem__()`` method is called\n with appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to integers. If either bound is\n negative, the sequence\'s length is added to it. The resulting\n bounds are clipped to lie between zero and the sequence\'s length,\n inclusive. Finally, the sequence object is asked to replace the\n slice with the items of the assigned sequence. The length of the\n slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print(x)\n\nSee also:\n\n **PEP 3132** - Extended Iterable Unpacking\n The specification for the ``*target`` feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n',
'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n',
'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier (which can\nbe customized by overriding the ``__getattr__()`` method). If this\nattribute is not available, the exception ``AttributeError`` is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n',
'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer and the other must be a sequence. In the former\ncase, the numbers are converted to a common type and then multiplied\ntogether. In the latter case, sequence repetition is performed; a\nnegative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Integer division yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: ``x == (x//y)*y + (x%y)``. Floor division and modulo are\nalso connected with the built-in function ``divmod()``: ``divmod(x, y)\n== (x//y, x%y)``. [2].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the ``divmod()``\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n',
'bitwise': '\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n',
'bltin-code-objects': '\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``__code__`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec()`` or ``eval()`` built-in functions.\n\nSee *The standard type hierarchy* for more information.\n',
'bltin-ellipsis-object': '\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n``Ellipsis`` (a built-in name). ``type(Ellipsis)()`` produces the\n``Ellipsis`` singleton.\n\nIt is written as ``Ellipsis`` or ``...``.\n',
'bltin-null-object': "\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name). ``type(None)()`` produces\nthe same singleton.\n\nIt is written as ``None``.\n",
'bltin-type-objects': "\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<class 'int'>``.\n",
'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a ``__bool__()`` method.\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n',
'break': '\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n',
'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n',
'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n``__call__()`` method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n',
'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n',
'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n',
'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements, while the ``with`` statement\nallows the execution of initialization and finalization code around a\nblock of code. Function and class definitions are also syntactically\ncompound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print()`` calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n',
'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n',
'continue': '\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n',
'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n',
'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n',
'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the ``readline`` module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the ``print`` command.\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: ``pdb.py`` now accepts a ``-c`` option that\nexecutes commands as if given in a ``.pdbrc`` file, see *Debugger\nCommands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``continue`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type ``continue``, or you can\n step through the statement using ``step`` or ``next`` (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module ``__main__`` is used. (See\n the explanation of the built-in ``exec()`` or ``eval()``\n functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When ``runeval()`` returns, it returns the\n value of the expression. Otherwise this function is similar to\n ``run()``.\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n ``continue`` command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n``h(elp)`` means that either ``h`` or ``help`` can be used to enter\nthe help command (but not ``he`` or ``hel``, nor ``H`` or ``Help`` or\n``HELP``). Arguments to commands must be separated by whitespace\n(spaces or tabs). Optional arguments are enclosed in square brackets\n(``[]``) in the command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n(``|``).\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a ``list`` command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint (``!``). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by\n``;;``. (A single ``;`` is not used as it is the separator for\nmultiple commands in a line that is passed to the Python parser.) No\nintelligence is applied to separating the commands; the input is split\nat the first ``;;`` pair, even if it is in the middle of a quoted\nstring.\n\nIf a file ``.pdbrc`` exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ``.pdbrc`` can now contain commands that\ncontinue debugging, such as ``continue`` or ``next``. Previously,\nthese commands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. ``help pdb``\n displays the full documentation (the docstring of the ``pdb``\n module). Since the *command* argument must be an identifier,\n ``help exec`` must be entered to get help on the ``!`` command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on ``sys.path``. Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for ``break``.\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just ``end`` to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) print some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with ``end``; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between ``next`` and\n ``step`` is that ``step`` stops inside a called function, while\n ``next`` executes called functions at (nearly) full speed, only\n stopping at the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a ``for`` loop or out\n of a ``finally`` clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With ``.`` as argument, list 11 lines around the current line.\n With one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by ``->``. If\n an exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ``>>``, if it\n differs from the current line.\n\n New in version 3.2: The ``>>`` marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for ``list``.\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np(rint) expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\npp expression\n\n Like the ``print`` command, except the value of the expression is\n pretty-printed using the ``pprint`` module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the ``code`` module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by ``%1``, ``%2``, and so on, while ``%*`` is replaced by\n all the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ``.pdbrc`` file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n ``global`` statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with ``shlex`` and the result is used as the new\n ``sys.argv``. History, breakpoints, actions and debugger options\n are preserved. ``restart`` is an alias for ``run``.\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the ``__name__`` in the frame globals.\n',
'del': '\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n',
'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
'else': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n',
'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n',
'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n',
'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n',
'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, ``077e010`` is legal, and denotes the same\nnumber as ``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n',
'for': '\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n',
'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s" | "a"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nThree conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, ``\'!r\'`` which calls ``repr()`` and ``\'!a\'``\nwhich calls ``ascii()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'{\' or \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective ``\'0b\'``, ``\'0o\'``, or\n``\'0x\'`` to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for ``\'g\'`` and ``\'G\'``\nconversions, trailing zeros are not removed from the result.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 3.1: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero (``\'0\'``) character enables\nsign-aware zero-padding for numeric types. This is equivalent to a\n*fill* character of ``\'0\'`` with an *alignment* type of ``\'=\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``, but converts ``nan`` to |\n | | ``NAN`` and ``inf`` to ``INF``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to ``\'g\'``, except with at least one digit past |\n | | the decimal point and a default precision of 12. This is |\n | | intended to match ``str()``, except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n',
'global': '\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in a string\nor code object supplied to the built-in ``exec()`` function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by ``global`` statements in\nthe code containing the function call. The same applies to the\n``eval()`` and ``compile()`` functions.\n',
'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters ``A`` through ``Z``, the underscore ``_`` and, except for the\nfirst character, the digits ``0`` through ``9``.\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n``unicodedata`` module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= <all characters in general categories Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>\n id_continue ::= <all characters in id_start, plus characters in the categories Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>\n xid_start ::= <all characters in id_start whose NFKC normalization is in "id_start xid_continue*">\n xid_continue ::= <all characters in id_continue whose NFKC normalization is in "id_continue*">\n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'if': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n',
'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
'import': '\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no ``from`` clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope where\n the ``import`` statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by ``as``, then the name following\n ``as`` is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe ``from`` form uses a slightly more complex process:\n\n1. find the module specified in the ``from`` clause loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the ``import`` clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, ``ImportError`` is raised.\n\n 4. otherwise, a reference to that value is bound in the local\n namespace, using the name in the ``as`` clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star (``\'*\'``), all public\nnames defined in the module are bound in the local namespace for the\nscope where the ``import`` statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope.\nAttempting to use it in class or function definitions will raise a\n``SyntaxError``.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. The\nwild card form of import --- ``import *`` --- is only allowed at the\nmodule level. Attempting to use it in class or function definitions\nwill raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are ``absolute_import``,\n``division``, ``generators``, ``unicode_literals``,\n``print_function``, ``nested_scopes`` and ``with_statement``. They\nare all redundant because they are always enabled, and only kept for\nbackwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions ``exec()`` and\n``compile()`` that occur in a module ``M`` containing a future\nstatement will, by default, use the new syntax or semantics associated\nwith the future statement. This can be controlled by optional\narguments to ``compile()`` --- see the documentation of that function\nfor details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n',
'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n',
'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n',
'lambda': '\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n lambda_form_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def <lambda>(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements or annotations.\n',
'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n',
'naming': "\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n",
'nonlocal': '\nThe ``nonlocal`` statement\n**************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe ``nonlocal`` statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope. This is\nimportant because the default behavior for binding is to search the\nlocal namespace first. The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a ``nonlocal`` statement, unlike to those listed in a\n``global`` statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a ``nonlocal`` statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also:\n\n **PEP 3104** - Access to Names in Outer Scopes\n The specification for the ``nonlocal`` statement.\n',
'numbers': "\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n",
'numeric-types': "\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand's type is a subclass of the left operand's\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand's\n non-reflected method. This behavior allows subclasses to\n override their ancestors' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n",
'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, ``id(x)`` is the\nmemory address where ``x`` is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The ``type()`` function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement and the \'``with``\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n',
'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` ``x`` | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not in``, ``is``, ``is not``, ``<``, | Comparisons, including membership |\n| ``<=``, ``>``, ``>=``, ``!=``, ``==`` | tests and identity tests, |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key: value...}``, ``{expressions...}`` | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for ``x//y`` to be one larger than ``(x-x%y)//y`` due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[3] While comparisons between strings make sense at the byte level,\n they may be counter-intuitive to users. For example, the strings\n ``"\\u00C7"`` and ``"\\u0327\\u0043"`` compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[4] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[5] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n',
'pass': '\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n',
'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n``10**2`` returns ``100``, but ``10**-2`` returns ``0.01``.\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``complex`` number. (In earlier versions it raised a\n``ValueError``.)\n',
'raise': '\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``RuntimeError`` exception is raised indicating\nthat this is an error.\n\nOtherwise, ``raise`` evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n``BaseException``. If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the ``__traceback__`` attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the ``with_traceback()`` exception method (which\nreturns the same exception instance, with its traceback set to its\nargument), like so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe ``from`` clause is used for exception chaining: if given, the\nsecond *expression* must be another exception class or instance, which\nwill then be attached to the raised exception as the ``__cause__``\nattribute (which is writable). If the raised exception is not\nhandled, both exceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s ``__context__`` attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n',
'return': '\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement indicates that the\ngenerator is done and will cause ``StopIteration`` to be raised. The\nreturned value (if any) is used as an argument to construct\n``StopIteration`` and becomes the ``StopIteration.value`` attribute.\n',
'sequence-types': "\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python's standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping's keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n",
'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2,n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,n)``.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n',
'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same\n``__getitem__()`` method as normal subscription) with a key that is\nconstructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of a proper slice is a\nslice object (see section *The standard type hierarchy*) whose\n``start``, ``stop`` and ``step`` attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n',
'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property being\n one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n',
'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``type(x).__getitem__(x,\ni)``. Except where mentioned, attempts to execute an operation raise\nan exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using ``type()``. The class body\nis executed in a new namespace and the class name is bound locally to\nthe result of ``type(name, bases, namespace)``.\n\nThe class creation process can be customised by passing the\n``metaclass`` keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both ``MyClass`` and ``MySubclass`` are\ninstances of ``Meta``:\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then ``type()`` is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n ``type()``, then it is used directly as the metaclass\n\n* if an instance of ``type()`` is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. ``type(cls)``) of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with ``TypeError``.\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a ``__prepare__``\nattribute, it is called as ``namespace = metaclass.__prepare__(name,\nbases, **kwds)`` (where the additional keyword arguments, if any, come\nfrom the class definition).\n\nIf the metaclass has no ``__prepare__`` attribute, then the class\nnamespace is initialised as an empty ``dict()`` instance.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3000\n Introduced the ``__prepare__`` namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as ``exec(body, globals(),\nnamespace)``. The key difference from a normal call to ``exec()`` is\nthat lexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling ``metaclass(name, bases,\nnamespace, **kwds)`` (the additional keywords passed here are the same\nas those passed to ``__prepare__``).\n\nThis class object is the one that will be referenced by the zero-\nargument form of ``super()``. ``__class__`` is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either ``__class__`` or ``super``. This allows the zero argument\nform of ``super()`` to correctly identify the class being defined\nbased on lexical scoping, while the class or instance that was used to\nmake the current call is identified based on the first argument passed\nto the method.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also:\n\n **PEP 3135** - New super\n Describes the implicit ``__class__`` closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n``collections.OrderedDict`` to remember the order that class members\nwere defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s ``__prepare__()`` method which returns an\nempty ``collections.OrderedDict``. That mapping records the methods\nand attributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s ``__new__()`` method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called ``members``.\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python\'s standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping\'s keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n',
'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see ``str.format()``,\n*Format String Syntax* and *String Formatting*) and the other based on\nC ``printf`` style formatting that handles a narrower range of types\nand is slightly harder to use correctly, but is often faster for the\ncases it can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the ``re`` module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter ``\'\xc3\x9f\'`` is equivalent to\n ``"ss"``. Since it is already lowercase, ``lower()`` would do\n nothing to ``\'\xc3\x9f\'``; ``casefold()`` converts it to ``"ss"``.\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is ``\'utf-8\'``. *errors* may be given to set a different\n error handling scheme. The default for *errors* is ``\'strict\'``,\n meaning that encoding errors raise a ``UnicodeError``. Other\n possible values are ``\'ignore\'``, ``\'replace\'``,\n ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by zero or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to ``str.format(**mapping)``, except that ``mapping`` is\n used directly and not copied to a ``dict`` . This is useful if for\n example ``mapping`` is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character\n ``c`` is alphanumeric if one of the following returns ``True``:\n ``c.isalpha()``, ``c.isdecimal()``, ``c.isdigit()``, or\n ``c.isnumeric()``.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when ``repr()`` is\n invoked on a string. It has no bearing on the handling of strings\n written to ``sys.stdout`` or ``sys.stderr``.)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A ``TypeError`` will be raised if there are\n any non-string values in *iterable*, including ``bytes`` objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n ``str.translate()``.\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified or ``-1``, then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, ``\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()`` returns\n ``[\'ab c\', \'\', \'de fg\', \'kl\']``, while the same call with\n ``splitlines(True)`` returns ``[\'ab c\\n\', \'\\n\', \'de fg\\r\',\n \'kl\\r\\n\']``.\n\n Unlike ``split()`` when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n ``s.swapcase().swapcase() == s``.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or ``None``. Unmapped\n characters are left untouched. Characters mapped to ``None`` are\n deleted.\n\n You can use ``str.maketrans()`` to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see\n ``encodings.cp1251`` for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n',
'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n stringescapeseq ::= "\\" <any source character>\n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n longbyteschar ::= <any ASCII character except "\\">\n bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the ``stringprefix`` or\n``bytesprefix`` and the rest of the literal. The source character set\nis defined by the encoding declaration; it is UTF-8 if no encoding\ndeclaration is given in the source file; see section *Encoding\ndeclarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes (``\'``) or double quotes (``"``). They can also be\nenclosed in matching groups of three single or double quotes (these\nare generally referred to as *triple-quoted strings*). The backslash\n(``\\``) character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with ``\'b\'`` or ``\'B\'``; they\nproduce an instance of the ``bytes`` type instead of the ``str`` type.\nThey may only contain ASCII characters; bytes with a numeric value of\n128 or greater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n``u`` prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter ``\'r\'`` or ``\'R\'``; such strings are called *raw strings* and\ntreat backslashes as literal characters. As a result, in string\nliterals, ``\'\\U\'`` and ``\'\\u\'`` escapes in raw strings are not treated\nspecially. Given that Python 2.x\'s raw unicode literals behave\ndifferently than Python 3.x\'s the ``\'ur\'`` syntax is not supported.\n\n New in version 3.3: The ``\'rb\'`` prefix of raw bytes literals has\n been added as a synonym of ``\'br\'``.\n\n New in version 3.3: Support for the unicode legacy literal\n (``u\'value\'``) was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\N{name}`` | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the byte\n with the given value. In a string literal, these escapes denote a\n Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight hex\n digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, ``r"\\""`` is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; ``r"\\"`` is not a valid string literal (even a raw\nstring cannot end in an odd number of backslashes). Specifically, *a\nraw string cannot end in a single backslash* (since the backslash\nwould escape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n',
'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription,\ne.g. a list or dictionary. User-defined objects can support\nsubscription by defining a ``__getitem__()`` method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a ``__getitem__()``\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that ``x[-1]`` selects the last item of\n``x``). The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero). Since the support\nfor negative indices and slicing occurs in the object\'s\n``__getitem__()`` method, subclasses overriding this method will need\nto explicitly add that support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n',
'truth': "\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0.0``, ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__bool__()`` or ``__len__()`` method, when that method returns the\n integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n",
'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal ``...`` or the\n built-in name ``Ellipsis``. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers (``int``)\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans (``bool``)\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of the integer\n type, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex`` (``complex``)\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode\n codepoints. All the codepoints in range ``U+0000 - U+10FFFF``\n can be represented in a string. Python doesn\'t have a\n ``chr`` type, and every character in the string is\n represented as a string object with length ``1``. The built-\n in function ``ord()`` converts a character to its codepoint\n (as an integer); ``chr()`` converts an integer in range ``0 -\n 10FFFF`` to the corresponding character. ``str.encode()`` can\n be used to convert a ``str`` to ``bytes`` using the given\n encoding, and ``bytes.decode()`` can be used to achieve the\n opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like ``b\'abc\'``) and the built-in function\n ``bytes()`` can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the ``decode()``\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type, as does the ``collections`` module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm.ndbm`` and ``dbm.gnu`` provide\n additional examples of mapping types, as does the\n ``collections`` module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | ``__doc__`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | ``__name__`` | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | ``__qualname__`` | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | ``__defaults__`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +---------------------------+---------------------------------+-------------+\n | ``__code__`` | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | ``__globals__`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | ``__dict__`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | ``__closure__`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | ``__annotations__`` | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | or ``\'return\'`` for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | ``__kwdefaults__`` | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: ``__self__`` is the class instance\n object, ``__func__`` is the function object; ``__doc__`` is the\n method\'s documentation (same as ``__func__.__doc__``);\n ``__name__`` is the method name (same as ``__func__.__name__``);\n ``__module__`` is the name of the module the method was defined\n in, or ``None`` if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its ``__self__`` attribute is the instance, and the method\n object is said to be bound. The new method\'s ``__func__``\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``__func__``\n attribute of the new instance is not the original method object\n but its ``__func__`` attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its ``__self__``\n attribute is the class itself, and its ``__func__`` attribute is\n the function object underlying the class method.\n\n When an instance method object is called, the underlying\n function (``__func__``) is called, inserting the class instance\n (``__self__``) in front of the argument list. For instance,\n when ``C`` is a class which contains a definition for a function\n ``f()``, and ``x`` is an instance of ``C``, calling ``x.f(1)``\n is equivalent to calling ``C.f(x, 1)``.\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in ``__self__`` will\n actually be the class itself, so that calling either ``x.f(1)``\n or ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``iterator__next__()`` method will cause the\n function to execute until it provides a value using the\n ``yield`` statement. When the function executes a ``return``\n statement or falls off the end, a ``StopIteration`` exception is\n raised and the iterator will have reached the end of the set of\n values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override ``__new__()``. The arguments of the\n call are passed to ``__new__()`` and, in the typical case, to\n ``__init__()`` to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a ``__call__()`` method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the ``import``\n statement (see ``import``), or by calling functions such as\n ``importlib.import_module()`` and built-in ``__import__()``. A\n module object has a namespace implemented by a dictionary object\n (this is the dictionary referenced by the ``__globals__`` attribute\n of functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., ``m.x`` is\n equivalent to ``m.__dict__["x"]``. A module object does not contain\n the code object used to initialize the module (since it isn\'t\n needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute may be missing for certain types of modules,\n such as C modules that are statically linked into the interpreter;\n for extension modules loaded dynamically from a shared library, it\n is the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., ``C.x`` is translated to\n ``C.__dict__["x"]`` (although there are a number of hooks which\n allow for other means of locating attributes). When the attribute\n name is not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a class method object, it is transformed into an instance method\n object whose ``__self__`` attributes is ``C``. When it would yield\n a static method object, it is transformed into the object wrapped\n by the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its ``__dict__``.\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose ``__self__`` attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s\n ``__dict__``. If no class attribute is found, and the object\'s\n class has a ``__getattr__()`` method, that is called to satisfy the\n lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the ``open()`` built-in function,\n and also ``os.popen()``, ``os.fdopen()``, and the ``makefile()``\n method of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n ``io.TextIOBase`` abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_lasti`` gives the precise instruction (this is an index into\n the bytecode string of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_lineno`` is the current line number\n of the frame --- writing to this from within a trace function\n jumps to the given line (only for the bottom-most frame). A\n debugger can implement a Jump command (aka Set Next Statement)\n by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by ``sys.exc_info()``. When the program contains\n no suitable handler, the stack trace is written (nicely\n formatted) to the standard error stream; if the interpreter is\n interactive, it is also made available to the user as\n ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for ``__getitem__()``\n methods. They are also created by the built-in ``slice()``\n function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n',
'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n',
'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterator*\n object. Each item in the iterable must itself be an iterator with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to ``{"one": 1, "two": 2, "three": 3}``:\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n If a subclass of dict defines a method ``__missing__()``, if the\n key *key* is not present, the ``d[key]`` operation calls that\n method with the key *key* as argument. The ``d[key]`` operation\n then returns or raises whatever is returned or raised by the\n ``__missing__(key)`` call if the key is not present. No other\n operations or methods invoke ``__missing__()``. If\n ``__missing__()`` is not defined, ``KeyError`` is raised.\n ``__missing__()`` must be a method; it cannot be an instance\n variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See ``collections.Counter`` for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iter(d.keys())``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n items()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also:\n\n ``types.MappingProxyType`` can be used to create a read-only view\n of a ``dict``.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.keys()``, ``dict.values()`` and\n``dict.items()`` are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that ``(key, value)`` pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class ``collections.abc.Set`` are available (for example, ``==``,\n``<``, or ``^``).\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n',
'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the\n``self`` argument to the argument list. Bound methods have two\nspecial read-only attributes: ``m.__self__`` is the object on which\nthe method operates, and ``m.__func__`` is the function implementing\nthe method. Calling ``m(arg-1, arg-2, ..., arg-n)`` is completely\nequivalent to calling ``m.__func__(m.__self__, arg-1, arg-2, ...,\narg-n)``.\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.__func__``), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an ``AttributeError`` being raised.\nIn order to set a method attribute, you need to explicitly set it on\nthe underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n',
'typesmodules': "\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special attribute of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n",
'typesseq': '\nSequence Types --- ``list``, ``tuple``, ``range``\n*************************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The ``collections.abc.Sequence``\nABC is provided to make it easier to correctly implement these\noperations on custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe ``in`` and ``not in`` operations have the same priorities as the\ncomparison operations. The ``+`` (concatenation) and ``*``\n(repetition) operations have the same priority as the corresponding\nnumeric operations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+----------------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+----------------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| ``s * n`` or ``n * s`` | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``s.index(x[, i[, j]])`` | index of the first occurence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| ``s.count(x)`` | total number of occurences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the ``in`` and ``not in`` operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as ``str``, ``bytes`` and ``bytearray``) also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. Concatenating immutable sequences always results in a new object.\n This means that building up a sequence by repeated concatenation\n will have a quadratic runtime cost in the total sequence length.\n To get a linear runtime cost, you must switch to one of the\n alternatives below:\n\n * if concatenating ``str`` objects, you can build a list and use\n ``str.join()`` at the end or else write to a ``io.StringIO``\n instance and retrieve its value when complete\n\n * if concatenating ``bytes`` objects, you can similarly use\n ``bytes.join()`` or ``io.BytesIO``, or you can do in-place\n concatenation with a ``bytearray`` object. ``bytearray`` objects\n are mutable and have an efficient overallocation mechanism\n\n * if concatenating ``tuple`` objects, extend a ``list`` instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as ``range``) only support item sequences\n that follow specific patterns, and hence don\'t support sequence\n concatenation or repetition.\n\n8. ``index`` raises ``ValueError`` when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using ``s[i:j].index(x)``,\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe ``hash()`` built-in.\n\nThis support allows immutable sequences, such as ``tuple`` instances,\nto be used as ``dict`` keys and stored in ``set`` and ``frozenset``\ninstances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in ``TypeError``.\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: ``[]``\n\n * Using square brackets, separating items with commas: ``[a]``,\n ``[a, b, c]``\n\n * Using a list comprehension: ``[x for x in iterable]``\n\n * Using the type constructor: ``list()`` or ``list(iterable)``\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to ``iterable[:]``. For example, ``list(\'abc\')``\n returns ``[\'a\', \'b\', \'c\']`` and ``list( (1, 2, 3) )`` returns ``[1,\n 2, 3]``. If no argument is given, the constructor creates a new\n empty list, ``[]``.\n\n Many other operations also produce lists, including the\n ``sorted()`` built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only ``<``\n comparisons between items. Exceptions are not suppressed - if\n any comparison operations fail, the entire sort operation will\n fail (and the list will likely be left in a partially modified\n state).\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n ``key=str.lower``). The key corresponding to each item in the\n list is calculated once and then used for the entire sorting\n process. The default value of ``None`` means that list items are\n sorted directly without calculating a separate key value.\n\n The ``functools.cmp_to_key()`` utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n ``sorted()`` to explicitly request a new sorted list instance).\n\n The ``sort()`` method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can\n detect that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the\n``enumerate()`` built-in). Tuples are also used for cases where an\nimmutable sequence of homogeneous data is needed (such as allowing\nstorage in a ``set`` or ``dict`` instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: ``()``\n\n * Using a trailing comma for a singleton tuple: ``a,`` or ``(a,)``\n\n * Separating items with commas: ``a, b, c`` or ``(a, b, c)``\n\n * Using the ``tuple()`` built-in: ``tuple()`` or\n ``tuple(iterable)``\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, ``tuple(\'abc\')`` returns ``(\'a\', \'b\',\n \'c\')`` and ``tuple( [1, 2, 3] )`` returns ``(1, 2, 3)``. If no\n argument is given, the constructor creates a new empty tuple,\n ``()``.\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, ``f(a, b, c)`` is a function call with three\n arguments, while ``f((a, b, c))`` is a function call with a 3-tuple\n as the sole argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, ``collections.namedtuple()`` may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe ``range`` type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in ``for`` loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in ``int`` or any object that implements the ``__index__``\n special method). If the *step* argument is omitted, it defaults to\n ``1``. If the *start* argument is omitted, it defaults to ``0``. If\n *step* is zero, ``ValueError`` is raised.\n\n For a positive *step*, the contents of a range ``r`` are determined\n by the formula ``r[i] = start + step*i`` where ``i >= 0`` and\n ``r[i] < stop``.\n\n For a negative *step*, the contents of the range are still\n determined by the formula ``r[i] = start + step*i``, but the\n constraints are ``i >= 0`` and ``r[i] > stop``.\n\n A range object will be empty if ``r[0]`` does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than ``sys.maxsize`` are\n permitted but some features (such as ``len()``) may raise\n ``OverflowError``.\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the ``range`` type over a regular ``list`` or\n``tuple`` is that a ``range`` object will always take the same (small)\namount of memory, no matter the size of the range it represents (as it\nonly stores the ``start``, ``stop`` and ``step`` values, calculating\nindividual items and subranges as needed).\n\nRange objects implement the ``collections.Sequence`` ABC, and provide\nfeatures such as containment tests, element index lookup, slicing and\nsupport for negative indices (see *Sequence Types --- list, tuple,\nrange*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with ``==`` and ``!=`` compares\nthem as sequences. That is, two range objects are considered equal if\nthey represent the same sequence of values. (Note that two range\nobjects that compare equal might have different ``start``, ``stop``\nand ``step`` attributes, for example ``range(0) == range(2, 1, 3)`` or\n``range(0, 3, 2) == range(0, 4, 2)``.)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test ``int`` objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The ``start``, ``stop`` and ``step`` attributes.\n',
'typesseq-mutable': "\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don't support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n",
'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of ``x`` is defined as\n``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n',
'while': '\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n',
'with': '\nThe ``with`` statement\n**********************\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n',
'yield': '\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the ``next()`` function on\nthe generator repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of ``expression_list`` is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nThe ``yield`` statement is allowed in the ``try`` clause of a ``try``\n... ``finally`` construct. If the generator is not resumed before it\nis finalized (by reaching a zero reference count or by being garbage\ncollected), the generator-iterator\'s ``close()`` method will be\ncalled, allowing any pending ``finally`` clauses to execute.\n\nWhen ``yield from <expr>`` is used, it treats the supplied expression\nas a subiterator, producing values from it until the underlying\niterator is exhausted.\n\n Changed in version 3.3: Added ``yield from <expr>`` to delegate\n control flow to a subiterator\n\nFor full details of ``yield`` semantics, refer to the *Yield\nexpressions* section.\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal to enhance the API and syntax of generators, making\n them usable as simple coroutines.\n\n **PEP 0380** - Syntax for Delegating to a Subgenerator\n The proposal to introduce the ``yield_from`` syntax, making\n delegation to sub-generators easy.\n'}
|
matthewfranglen/spark
|
refs/heads/master
|
examples/src/main/python/ml/bisecting_k_means_example.py
|
31
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating bisecting k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/bisecting_k_means_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import BisectingKMeans
from pyspark.ml.evaluation import ClusteringEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("BisectingKMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a bisecting k-means model.
bkm = BisectingKMeans().setK(2).setSeed(1)
model = bkm.fit(dataset)
# Make predictions
predictions = model.transform(dataset)
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# Shows the result.
print("Cluster Centers: ")
centers = model.clusterCenters()
for center in centers:
print(center)
# $example off$
spark.stop()
|
adamchainz/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py
|
298
|
data = 'spam4'
|
addition-it-solutions/project-all
|
refs/heads/master
|
addons/mass_mailing/models/mass_mailing.py
|
4
|
# -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
import random
import re
from openerp import tools
from openerp import models, api, _
from openerp import SUPERUSER_ID
from openerp.exceptions import UserError
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import ustr
from openerp.osv import osv, fields
URL_REGEX = r'(\bhref=[\'"]([^\'"]+)[\'"])'
class MassMailingTag(osv.Model):
"""Model of categories of mass mailing, i.e. marketing, newsletter, ... """
_name = 'mail.mass_mailing.tag'
_description = 'Mass Mailing Tag'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class MassMailingContact(osv.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mail.mass_mailing.contact'
_inherit = 'mail.thread'
_description = 'Mass Mailing Contact'
_order = 'email'
_rec_name = 'email'
_columns = {
'name': fields.char('Name'),
'email': fields.char('Email', required=True),
'create_date': fields.datetime('Create Date'),
'list_id': fields.many2one(
'mail.mass_mailing.list', string='Mailing List',
ondelete='cascade', required=True,
),
'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'),
'unsubscription_date': fields.datetime('Unsubscription Date'),
'message_bounce': fields.integer('Bounce', help='Counter of the number of bounced emails for this contact.'),
}
def _get_latest_list(self, cr, uid, context={}):
lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context)
return lid and lid[0] or False
_defaults = {
'list_id': _get_latest_list
}
def on_change_opt_out(self, cr, uid, id, opt_out, context=None):
return {'value': {
'unsubscription_date': opt_out and fields.datetime.now() or False,
}}
def create(self, cr, uid, vals, context=None):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.datetime.now() or False
return super(MassMailingContact, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.datetime.now() or False
return super(MassMailingContact, self).write(cr, uid, ids, vals, context=context)
def get_name_email(self, name, context):
name, email = self.pool['res.partner']._parse_partner_name(name, context=context)
if name and not email:
email = name
if email and not name:
name = email
return name, email
def name_create(self, cr, uid, name, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def add_to_list(self, cr, uid, name, list_id, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def message_get_default_recipients(self, cr, uid, ids, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False}
return res
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. As contacts
do not inherit form mail.thread, we have to define this method to be able
to track bounces (see mail.thread for more details). """
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
class MassMailingList(osv.Model):
"""Model of a contact list. """
_name = 'mail.mass_mailing.list'
_order = 'name'
_description = 'Mailing List'
def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, 0)
Contacts = self.pool.get('mail.mass_mailing.contact')
for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context):
result[group['list_id'][0]] = group['list_id_count']
return result
_columns = {
'name': fields.char('Mailing List', required=True),
'create_date': fields.datetime('Creation Date'),
'contact_nbr': fields.function(
_get_contact_nbr, type='integer',
string='Number of Contacts',
),
'popup_content': fields.html("Website Popup Content", translate=True, required=True, sanitize=False),
'popup_redirect_url': fields.char("Website Popup Redirect URL"),
}
def _get_default_popup_content(self, cr, uid, context=None):
return """<div class="o_popup_modal_header text-center">
<h3 class="o_popup_modal_title mt8">Odoo Presents</h3>
</div>
<div class="o_popup_message">
<font>7</font>
<strong>Business Hacks</strong>
<span> to<br/>boost your marketing</span>
</div>
<p class="o_message_paragraph">Join our Marketing newsletter and get <strong>this white paper instantly</strong></p>"""
_defaults = {
'popup_content': _get_default_popup_content,
}
class MassMailingStage(osv.Model):
"""Stage for mass mailing campaigns. """
_name = 'mail.mass_mailing.stage'
_description = 'Mass Mailing Campaign Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 0,
}
class MassMailingCampaign(osv.Model):
"""Model of mass mailing campaigns. """
_name = "mail.mass_mailing.campaign"
_description = 'Mass Mailing Campaign'
_inherit = ['utm.mixin']
_inherits = {'utm.campaign': 'campaign_id'}
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing campaign """
results = {}
cr.execute("""
SELECT
c.id as campaign_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied ,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing_campaign c
ON (c.id = s.mass_mailing_campaign_id)
WHERE
c.id IN %s
GROUP BY
c.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('campaign_id')] = row
total = row['total'] or 1
row['delivered'] = row['sent'] - row['bounced']
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
row['bounced_ratio'] = 100.0 * row['bounced'] / total
return results
def _get_clicks_ratio(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, 0)
cr.execute("""
SELECT COUNT(DISTINCT(stats.id)) AS nb_mails, COUNT(DISTINCT(clicks.mail_stat_id)) AS nb_clicks, stats.mass_mailing_campaign_id AS id
FROM mail_mail_statistics AS stats
LEFT OUTER JOIN website_links_click AS clicks ON clicks.mail_stat_id = stats.id
WHERE stats.mass_mailing_campaign_id IN %s
GROUP BY stats.mass_mailing_campaign_id
""", (tuple(ids), ))
for record in cr.dictfetchall():
res[record['id']] = 100 * record['nb_clicks'] / record['nb_mails']
return res
def _get_total_mailings(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
for mail in self.pool['mail.mass_mailing'].read_group(cr, uid, [('mass_mailing_campaign_id', 'in', ids)], ['mass_mailing_campaign_id'], ['mass_mailing_campaign_id'], context=context):
result[mail['mass_mailing_campaign_id'][0]] = mail['mass_mailing_campaign_id_count']
return result
_columns = {
'name': fields.char('Name', required=True),
'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True),
'user_id': fields.many2one(
'res.users', 'Responsible',
required=True,
),
'campaign_id': fields.many2one('utm.campaign', 'campaign_id',
required=True, ondelete='cascade'),
'tag_ids': fields.many2many(
'mail.mass_mailing.tag', 'mail_mass_mailing_tag_rel',
'tag_id', 'campaign_id', string='Tags'),
'mass_mailing_ids': fields.one2many(
'mail.mass_mailing', 'mass_mailing_campaign_id',
'Mass Mailings',
),
'unique_ab_testing': fields.boolean(
'AB Testing',
help='If checked, recipients will be mailed only once, allowing to send'
'various mailings in a single campaign to test the effectiveness'
'of the mailings.'),
'color': fields.integer('Color Index'),
'clicks_ratio': fields.function(
_get_clicks_ratio, string="Number of clicks",
type="integer",
),
# stat fields
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics'
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics'
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics'
),
'sent': fields.function(
_get_statistics, string='Sent Emails',
type='integer', multi='_get_statistics'
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics'
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics'
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
'total_mailings': fields.function(_get_total_mailings, string='Mailings', type='integer'),
'bounced_ratio': fields.function(
_get_statistics, string='Bounced Ratio',
type='integer', multi='_get_statistics',
),
}
def _get_default_stage_id(self, cr, uid, context=None):
stage_ids = self.pool['mail.mass_mailing.stage'].search(cr, uid, [], limit=1, context=context)
return stage_ids and stage_ids[0] or False
def _get_source_id(self, cr, uid, context=None):
return self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'utm.utm_source_newsletter')
def _get_medium_id(self, cr, uid, context=None):
return self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'utm.utm_medium_email')
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'stage_id': lambda self, *args: self._get_default_stage_id(*args),
'source_id': lambda self, *args: self._get_source_id(*args),
'medium_id': lambda self,*args: self._get_medium_id(*args),
}
def mass_mailing_statistics_action(self, cr, uid, ids, context=None):
res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'mass_mailing', 'action_view_mass_mailing_statistics', context=context)
res['domain'] = [('mass_mailing_campaign_id', 'in', ids)]
return res
def get_recipients(self, cr, uid, ids, model=None, context=None):
"""Return the recipients of a mailing campaign. This is based on the statistics
build for each mailing. """
Statistics = self.pool['mail.mail.statistics']
res = dict.fromkeys(ids, False)
for cid in ids:
domain = [('mass_mailing_campaign_id', '=', cid)]
if model:
domain += [('model', '=', model)]
stat_ids = Statistics.search(cr, uid, domain, context=context)
res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context))
return res
def on_change_campaign_name(self, cr, uid, ids, name, context=None):
if name:
mass_mailing_campaign = self.browse(cr, uid, ids, context=context)
if mass_mailing_campaign.campaign_id:
utm_campaign_id = mass_mailing_campaign.campaign_id.id
self.pool['utm.campaign'].write(cr, uid, [utm_campaign_id], {'name': name}, context=context)
else:
utm_campaign_id = self.pool['utm.campaign'].create(cr, uid, {'name': name}, context=context)
return {'value': {'campaign_id': utm_campaign_id}}
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
""" Override read_group to always display all states. """
if groupby and groupby[0] == "stage_id":
# Default result structure
states_read = self.pool['mail.mass_mailing.stage'].search_read(cr, uid, [], ['name'], context=context)
states = [(state['id'], state['name']) for state in states_read]
read_group_all_states = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_id', '=', state_value)],
'stage_id': state_value,
'state_count': 0,
} for state_value, state_name in states]
# Get standard results
read_group_res = super(MassMailingCampaign, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
# Update standard results with default results
result = []
for state_value, state_name in states:
res = filter(lambda x: x['stage_id'] == (state_value, state_name), read_group_res)
if not res:
res = filter(lambda x: x['stage_id'] == state_value, read_group_all_states)
res[0]['stage_id'] = [state_value, state_name]
result.append(res[0])
return result
else:
return super(MassMailingCampaign, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
class MassMailing(osv.Model):
""" MassMailing models a wave of emails for a mass mailign campaign.
A mass mailing is an occurence of sending emails. """
_name = 'mail.mass_mailing'
_description = 'Mass Mailing'
# number of periods for tracking mail_mail statistics
_period_number = 6
_order = 'sent_date DESC'
# _send_trigger = 5 # Number under which mails are send directly
_inherit = ['utm.mixin']
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, date_begin, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
date_begin = date_begin.date()
section_result = [{'value': 0,
'tooltip': ustr((date_begin + relativedelta.relativedelta(days=i)).strftime('%d %B %Y')),
} for i in range(0, self._period_number)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
field = obj._fields.get(groupby_field.split(':')[0])
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if field.type == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern).date()
timedelta = relativedelta.relativedelta(group_begin_date, date_begin)
section_result[timedelta.days] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field)}
return section_result
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing """
results = {}
cr.execute("""
SELECT
m.id as mailing_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.sent is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced,
COUNT(CASE WHEN s.exception is not null THEN 1 ELSE null END) AS failed
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing m
ON (m.id = s.mass_mailing_id)
WHERE
m.id IN %s
GROUP BY
m.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('mailing_id')] = row
total = row['total'] or 1
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
row['bounced_ratio'] = 100.0 * row['bounced'] / total
return results
def _get_mailing_model(self, cr, uid, context=None):
res = []
for model_name in self.pool:
model = self.pool[model_name]
if hasattr(model, '_mail_mass_mailing') and getattr(model, '_mail_mass_mailing'):
res.append((model._name, getattr(model, '_mail_mass_mailing')))
res.append(('mail.mass_mailing.contact', _('Mailing List')))
return res
def _get_clicks_ratio(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, 0)
cr.execute("""
SELECT COUNT(DISTINCT(stats.id)) AS nb_mails, COUNT(DISTINCT(clicks.mail_stat_id)) AS nb_clicks, stats.mass_mailing_id AS id
FROM mail_mail_statistics AS stats
LEFT OUTER JOIN website_links_click AS clicks ON clicks.mail_stat_id = stats.id
WHERE stats.mass_mailing_id IN %s
GROUP BY stats.mass_mailing_id
""", (tuple(ids), ))
for record in cr.dictfetchall():
res[record['id']] = 100 * record['nb_clicks'] / record['nb_mails']
return res
def _get_next_departure(self, cr, uid, ids, name, arg, context=None):
mass_mailings = self.browse(cr, uid, ids, context=context)
cron_next_call = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, 'mass_mailing.ir_cron_mass_mailing_queue', context=context).nextcall
result = {}
for mass_mailing in mass_mailings:
schedule_date = mass_mailing.schedule_date
if schedule_date:
if datetime.now() > datetime.strptime(schedule_date, tools.DEFAULT_SERVER_DATETIME_FORMAT):
result[mass_mailing.id] = cron_next_call
else:
result[mass_mailing.id] = schedule_date
else:
result[mass_mailing.id] = cron_next_call
return result
def _get_total(self, cr, uid, ids, name, arg, context=None):
mass_mailings = self.browse(cr, uid, ids, context=context)
result = {}
for mass_mailing in mass_mailings:
mailing = self.browse(cr, uid, mass_mailing.id, context=context)
result[mass_mailing.id] = len(self.get_recipients(cr, SUPERUSER_ID, mailing, context=context))
return result
# indirections for inheritance
_mailing_model = lambda self, *args, **kwargs: self._get_mailing_model(*args, **kwargs)
_columns = {
'name': fields.char('Subject', required=True),
'email_from': fields.char('From', required=True),
'create_date': fields.datetime('Creation Date'),
'sent_date': fields.datetime('Sent Date', oldname='date', copy=False),
'schedule_date': fields.datetime('Schedule in the Future'),
'body_html': fields.html('Body'),
'attachment_ids': fields.many2many(
'ir.attachment', 'mass_mailing_ir_attachments_rel',
'mass_mailing_id', 'attachment_id', 'Attachments'
),
'keep_archives': fields.boolean('Keep Archives'),
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
ondelete='set null',
),
'clicks_ratio': fields.function(
_get_clicks_ratio, string="Number of Clicks",
type="integer",
),
'state': fields.selection(
[('draft', 'Draft'), ('in_queue', 'In Queue'), ('sending', 'Sending'), ('done', 'Sent')],
string='Status', required=True, copy=False
),
'color': fields.related(
'mass_mailing_campaign_id', 'color',
type='integer', string='Color Index',
),
# mailing options
'reply_to_mode': fields.selection(
[('thread', 'Followers of leads/applicants'), ('email', 'Specified Email Address')],
string='Reply-To Mode', required=True,
),
'reply_to': fields.char('Reply To', help='Preferred Reply-To Address'),
# recipients
'mailing_model': fields.selection(_mailing_model, string='Recipients Model', required=True),
'mailing_domain': fields.char('Domain', oldname='domain'),
'contact_list_ids': fields.many2many(
'mail.mass_mailing.list', 'mail_mass_mailing_list_rel',
string='Mailing Lists',
),
'contact_ab_pc': fields.integer(
'A/B Testing percentage',
help='Percentage of the contacts that will be mailed. Recipients will be taken randomly.'
),
# statistics data
'statistics_ids': fields.one2many(
'mail.mail.statistics', 'mass_mailing_id',
'Emails Statistics',
),
'total': fields.function(
_get_total, string='Total',
type='integer',
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics',
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics',
),
'sent': fields.function(
_get_statistics, string='Sent',
type='integer', multi='_get_statistics',
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics',
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics',
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics',
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
'bounced_ratio': fields.function(
_get_statistics, String='Bouncded Ratio',
type='integer', multi='_get_statistics',
),
'next_departure': fields.function(
_get_next_departure, string='Next Departure',
type='datetime'
),
}
def mass_mailing_statistics_action(self, cr, uid, ids, context=None):
res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'mass_mailing', 'action_view_mass_mailing_statistics', context=context)
link_click_ids = self.pool['website.links.click'].search(cr, uid, [('mass_mailing_id', 'in', ids)], context=context)
res['domain'] = [('id', 'in', link_click_ids)]
return res
def default_get(self, cr, uid, fields, context=None):
res = super(MassMailing, self).default_get(cr, uid, fields, context=context)
if 'reply_to_mode' in fields and not 'reply_to_mode' in res and res.get('mailing_model'):
if res['mailing_model'] in ['res.partner', 'mail.mass_mailing.contact']:
res['reply_to_mode'] = 'email'
else:
res['reply_to_mode'] = 'thread'
return res
_defaults = {
'state': 'draft',
'email_from': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'reply_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'mailing_model': 'mail.mass_mailing.contact',
'contact_ab_pc': 100,
}
def onchange_mass_mailing_campaign_id(self, cr, uid, id, mass_mailing_campaign_ids, context=None):
if mass_mailing_campaign_ids:
mass_mailing_campaign = self.pool['mail.mass_mailing.campaign'].browse(cr, uid, mass_mailing_campaign_ids, context=context)
dic = {'campaign_id': mass_mailing_campaign[0].campaign_id.id,
'source_id': mass_mailing_campaign[0].source_id.id,
'medium_id': mass_mailing_campaign[0].medium_id.id}
return {'value': dic}
#------------------------------------------------------
# Technical stuff
#------------------------------------------------------
def copy_data(self, cr, uid, id, default=None, context=None):
mailing = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_('%s (copy)') % mailing.name)
return super(MassMailing, self).copy_data(cr, uid, id, default, context=context)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
""" Override read_group to always display all states. """
if groupby and groupby[0] == "state":
# Default result structure
# states = self._get_state_list(cr, uid, context=context)
states = [('draft', 'Draft'), ('in_queue', 'In Queue'), ('sending', 'Sending'), ('done', 'Sent')]
read_group_all_states = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('state', '=', state_value)],
'state': state_value,
'state_count': 0,
} for state_value, state_name in states]
# Get standard results
read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
# Update standard results with default results
result = []
for state_value, state_name in states:
res = filter(lambda x: x['state'] == state_value, read_group_res)
if not res:
res = filter(lambda x: x['state'] == state_value, read_group_all_states)
res[0]['state'] = [state_value, state_name]
result.append(res[0])
return result
else:
return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
def update_opt_out(self, cr, uid, mailing_id, email, res_ids, value, context=None):
mailing = self.browse(cr, uid, mailing_id, context=context)
model = self.pool[mailing.mailing_model]
if 'opt_out' in model._fields:
email_fname = 'email_from'
if 'email' in model._fields:
email_fname = 'email'
record_ids = model.search(cr, uid, [('id', 'in', res_ids), (email_fname, 'ilike', email)], context=context)
model.write(cr, uid, record_ids, {'opt_out': value}, context=context)
#------------------------------------------------------
# Views & Actions
#------------------------------------------------------
def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None):
value = {}
if mailing_model == 'mail.mass_mailing.contact':
mailing_list_ids = set()
for item in list_ids:
if isinstance(item, (int, long)):
mailing_list_ids.add(item)
elif len(item) == 3:
mailing_list_ids |= set(item[2])
if mailing_list_ids:
value['mailing_domain'] = "[('list_id', 'in', %s), ('opt_out', '=', False)]" % list(mailing_list_ids)
else:
value['mailing_domain'] = "[('list_id', '=', False)]"
else:
value['mailing_domain'] = False
value['body_html'] = "on_change_model_and_list"
return {'value': value}
def action_duplicate(self, cr, uid, ids, context=None):
copy_id = None
for mid in ids:
copy_id = self.copy(cr, uid, mid, context=context)
if copy_id:
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.mass_mailing',
'res_id': copy_id,
'context': context,
'flags': {'initial_mode': 'edit'},
}
return False
def action_test_mailing(self, cr, uid, ids, context=None):
ctx = dict(context, default_mass_mailing_id=ids[0])
return {
'name': _('Test Mailing'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.mass_mailing.test',
'target': 'new',
'context': ctx,
}
#------------------------------------------------------
# Email Sending
#------------------------------------------------------
def get_recipients(self, cr, uid, mailing, context=None):
if mailing.mailing_domain:
domain = eval(mailing.mailing_domain)
res_ids = self.pool[mailing.mailing_model].search(cr, uid, domain, context=context)
else:
res_ids = []
domain = [('id', 'in', res_ids)]
# randomly choose a fragment
if mailing.contact_ab_pc < 100:
contact_nbr = self.pool[mailing.mailing_model].search(cr, uid, domain, count=True, context=context)
topick = int(contact_nbr / 100.0 * mailing.contact_ab_pc)
if mailing.mass_mailing_campaign_id and mailing.mass_mailing_campaign_id.unique_ab_testing:
already_mailed = self.pool['mail.mass_mailing.campaign'].get_recipients(cr, uid, [mailing.mass_mailing_campaign_id.id], context=context)[mailing.mass_mailing_campaign_id.id]
else:
already_mailed = set([])
remaining = set(res_ids).difference(already_mailed)
if topick > len(remaining):
topick = len(remaining)
res_ids = random.sample(remaining, topick)
return res_ids
def get_remaining_recipients(self, cr, uid, mailing, context=None):
res_ids = self.get_recipients(cr, uid, mailing, context=context)
already_mailed = self.pool['mail.mail.statistics'].search_read(cr, uid, [('model', '=', mailing.mailing_model),
('res_id', 'in', res_ids),
('mass_mailing_id', '=', mailing.id)], ['res_id'], context=context)
already_mailed_res_ids = [record['res_id'] for record in already_mailed]
return list(set(res_ids) - set(already_mailed_res_ids))
def send_mail(self, cr, uid, ids, context=None):
author_id = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id
for mailing in self.browse(cr, uid, ids, context=context):
# instantiate an email composer + send emails
res_ids = self.get_remaining_recipients(cr, uid, mailing, context=context)
if not res_ids:
raise UserError(_('Please select recipients.'))
if context:
comp_ctx = dict(context, active_ids=res_ids)
else:
comp_ctx = {'active_ids': res_ids}
# Convert links in absolute URLs before the application of the shortener
self.write(cr, uid, [mailing.id], {'body_html': self.pool['mail.template']._replace_local_links(cr, uid, mailing.body_html, context)}, context=context)
composer_values = {
'author_id': author_id,
'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids],
'body': self.convert_links(cr, uid, [mailing.id], context=context)[mailing.id],
'subject': mailing.name,
'model': mailing.mailing_model,
'email_from': mailing.email_from,
'record_name': False,
'composition_mode': 'mass_mail',
'mass_mailing_id': mailing.id,
'mailing_list_ids': [(4, l.id) for l in mailing.contact_list_ids],
'no_auto_thread': mailing.reply_to_mode != 'thread',
}
if mailing.reply_to_mode == 'email':
composer_values['reply_to'] = mailing.reply_to
composer_id = self.pool['mail.compose.message'].create(cr, uid, composer_values, context=comp_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [composer_id], auto_commit=True, context=comp_ctx)
self.write(cr, uid, [mailing.id], {'state': 'done'}, context=context)
return True
def convert_links(self, cr, uid, ids, context=None):
res = {}
for mass_mailing in self.browse(cr, uid, ids, context=context):
utm_mixin = mass_mailing.mass_mailing_campaign_id if mass_mailing.mass_mailing_campaign_id else mass_mailing
html = mass_mailing.body_html if mass_mailing.body_html else ''
vals = {'mass_mailing_id': mass_mailing.id}
if mass_mailing.mass_mailing_campaign_id:
vals['mass_mailing_campaign_id'] = mass_mailing.mass_mailing_campaign_id.id
if utm_mixin.campaign_id:
vals['campaign_id'] = utm_mixin.campaign_id.id
if utm_mixin.source_id:
vals['source_id'] = utm_mixin.source_id.id
if utm_mixin.medium_id:
vals['medium_id'] = utm_mixin.medium_id.id
res[mass_mailing.id] = self.pool['website.links'].convert_links(cr, uid, html, vals, blacklist=['/unsubscribe_from_list'], context=context)
return res
def put_in_queue(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'sent_date': fields.datetime.now(), 'state': 'in_queue'}, context=context)
def cancel_mass_mailing(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def retry_failed_mail(self, cr, uid, mass_mailing_ids, context=None):
mail_mail_ids = self.pool.get('mail.mail').search(cr, uid, [('mailing_id', 'in', mass_mailing_ids), ('state', '=', 'exception')], context=context)
self.pool.get('mail.mail').unlink(cr, uid, mail_mail_ids, context=context)
mail_mail_statistics_ids = self.pool.get('mail.mail.statistics').search(cr, uid, [('mail_mail_id_int', 'in', mail_mail_ids)])
self.pool.get('mail.mail.statistics').unlink(cr, uid, mail_mail_statistics_ids, context=context)
self.write(cr, uid, mass_mailing_ids, {'state': 'in_queue'})
def _process_mass_mailing_queue(self, cr, uid, context=None):
now = datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
mass_mailing_ids = self.search(cr, uid, [('state', 'in', ('in_queue', 'sending')), '|', ('schedule_date', '<', now), ('schedule_date', '=', False)], context=context)
for mass_mailing_id in mass_mailing_ids:
mass_mailing_record = self.browse(cr, uid, mass_mailing_id, context=context)
if len(self.get_remaining_recipients(cr, uid, mass_mailing_record, context=context)) > 0:
self.write(cr, uid, [mass_mailing_id], {'state': 'sending'}, context=context)
self.send_mail(cr, uid, [mass_mailing_id], context=context)
else:
self.write(cr, uid, [mass_mailing_id], {'state': 'done'}, context=context)
class MailMail(models.Model):
_inherit = ['mail.mail']
@api.model
def send_get_mail_body(self, mail, partner=None):
"""Override to add Statistic_id in shorted urls """
links_blacklist = ['/unsubscribe_from_list']
if mail.mailing_id and mail.body_html and mail.statistics_ids:
for match in re.findall(URL_REGEX, mail.body_html):
href = match[0]
url = match[1]
if not [s for s in links_blacklist if s in href]:
new_href = href.replace(url, url + '/m/' + str(mail.statistics_ids[0].id))
mail.body_html = mail.body_html.replace(href, new_href)
return super(MailMail, self).send_get_mail_body(mail, partner=partner)
|
jfterpstra/bluebottle
|
refs/heads/develop
|
bluebottle/accounting/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-23 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_nr', models.CharField(blank=True, max_length=35, verbose_name='account number')),
('account_name', models.CharField(max_length=50, verbose_name='account name')),
],
),
migrations.CreateModel(
name='BankTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender_account', models.CharField(max_length=35, verbose_name='holder account number')),
('currency', models.CharField(max_length=3, verbose_name='currency')),
('interest_date', models.DateField(verbose_name='interest date')),
('credit_debit', models.CharField(choices=[(b'C', 'Credit'), (b'D', 'Debit')], db_index=True, max_length=1, verbose_name='credit/debit')),
('amount', models.DecimalField(decimal_places=2, max_digits=14, verbose_name='amount')),
('counter_account', models.CharField(max_length=35, verbose_name='recipient account')),
('counter_name', models.CharField(max_length=70, verbose_name='recipient name')),
('book_date', models.DateField(db_index=True, verbose_name='book date')),
('book_code', models.CharField(max_length=2, verbose_name='book code')),
('filler', models.CharField(blank=True, max_length=6, verbose_name='filler')),
('description1', models.CharField(blank=True, max_length=35, verbose_name='description 1')),
('description2', models.CharField(blank=True, max_length=35, verbose_name='description 2')),
('description3', models.CharField(blank=True, max_length=35, verbose_name='description 3')),
('description4', models.CharField(blank=True, max_length=35, verbose_name='description 4')),
('description5', models.CharField(blank=True, max_length=35, verbose_name='description 5')),
('description6', models.CharField(blank=True, max_length=35, verbose_name='description 6')),
('end_to_end_id', models.CharField(blank=True, max_length=35, verbose_name='End to end ID')),
('id_recipient', models.CharField(blank=True, max_length=35, verbose_name='ID recipient account')),
('mandate_id', models.CharField(blank=True, max_length=35, verbose_name='Mandate ID')),
('status', models.CharField(blank=True, choices=[(b'valid', 'Valid'), (b'unknown', 'Invalid: Unknown transaction'), (b'mismatch', 'Invalid: Amount mismatch')], help_text='Cached status assigned during matching.', max_length=30, verbose_name='status')),
('status_remarks', models.CharField(blank=True, help_text='Additional remarks regarding the status.', max_length=250, verbose_name='status remarks')),
],
options={
'verbose_name': 'bank transaction',
'verbose_name_plural': 'bank transactions',
},
),
migrations.CreateModel(
name='BankTransactionCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
options={
'verbose_name': 'Bank transaction category',
'verbose_name_plural': 'Bank transaction categories',
},
),
migrations.CreateModel(
name='BankTransactionTenant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=35, verbose_name='Tenant name')),
],
),
migrations.CreateModel(
name='RemoteDocdataPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('merchant_reference', models.CharField(db_index=True, max_length=35, verbose_name='merchant reference')),
('triple_deal_reference', models.CharField(db_index=True, max_length=40, verbose_name='Triple Deal reference')),
('payment_type', models.CharField(db_index=True, max_length=15, verbose_name='type')),
('amount_collected', models.DecimalField(decimal_places=2, max_digits=14, verbose_name='amount collected')),
('currency_amount_collected', models.CharField(max_length=3, verbose_name='currency of amount collected')),
('tpcd', models.DecimalField(blank=True, decimal_places=2, max_digits=14, null=True, verbose_name='TPCD')),
('currency_tpcd', models.CharField(blank=True, max_length=3, verbose_name='currency of TPCD')),
('tpci', models.DecimalField(blank=True, decimal_places=2, max_digits=14, null=True, verbose_name='TPCI')),
('currency_tpci', models.CharField(blank=True, max_length=3, verbose_name='currency of TPCI')),
('docdata_fee', models.DecimalField(decimal_places=2, max_digits=14, verbose_name='Docdata payments fee')),
('currency_docdata_fee', models.CharField(max_length=3, verbose_name='currency of Docdata payments fee')),
('status', models.CharField(blank=True, choices=[(b'valid', 'Valid'), (b'inconsistent_chargeback', 'Invalid: inconsistent chargeback'), (b'missing', 'Invalid: Missing backoffice record'), (b'mismatch', 'Invalid: Amount mismatch')], help_text='Cached status assigned during matching.', max_length=30, verbose_name='status')),
('status_remarks', models.CharField(blank=True, help_text='Additional remarks regarding the status.', max_length=250, verbose_name='status remarks')),
],
options={
'ordering': ('-remote_payout__payout_date',),
},
),
migrations.CreateModel(
name='RemoteDocdataPayout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payout_reference', models.CharField(max_length=100, unique=True, verbose_name='Payout Reference')),
('payout_date', models.DateField(blank=True, db_index=True, null=True, verbose_name='Payout date')),
('start_date', models.DateField(blank=True, db_index=True, null=True, verbose_name='Start date')),
('end_date', models.DateField(blank=True, db_index=True, null=True, verbose_name='End date')),
('collected_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=14, null=True, verbose_name='Collected amount')),
('payout_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=14, null=True, verbose_name='Payout amount')),
],
options={
'ordering': ('-payout_date',),
},
),
]
|
Markus-Goetz/CDS-Invenio-Authorlist
|
refs/heads/master
|
modules/webaccess/lib/collection_restrictions_migration_kit.py
|
35
|
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This script will migrate collection restriction rules from previous
Apache-only method (column restricted in the collection table) to
enhanced FireRole/WebAccess aware mode.
"""
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.dbquery import run_sql
from invenio.access_control_admin import acc_add_authorization, acc_add_role, \
acc_get_action_id
from invenio.access_control_firerole import compile_role_definition, serialize
from invenio.access_control_config import VIEWRESTRCOLL
CFG_PROPOSED_ROLE_NAME = "%s group"
CFG_PROPOSED_ROLE_DESCRIPTION = "Group to access the following restricted collection(s): %s."
def retrieve_restricted_collection():
"""Return a dictionary with collectionname -> apache group."""
res = run_sql('SELECT name, restricted FROM collection WHERE restricted<>""')
if res:
return dict(res)
else:
return {}
def get_collections_for_group(restrictions, given_group):
"""Return a list of collections name accessible by the given group."""
collections = []
for collection, group in restrictions.iteritems():
if group == given_group:
collections.append(collection)
return collections
def create_needed_roles(restrictions, apache_group):
"""Create a role for the corresponding apache_group."""
role_name = CFG_PROPOSED_ROLE_NAME % apache_group
role_description = CFG_PROPOSED_ROLE_DESCRIPTION % ', '.join(get_collections_for_group(restrictions, apache_group))
role_definition_src = 'allow apache_group "%s"' % apache_group
print "Creating role '%s' ('%s') with firerole '%s'..." % (role_name, role_description, role_definition_src),
res = acc_add_role(role_name, role_description, serialize(compile_role_definition(role_definition_src)), role_definition_src)
if res == 0:
print "Already existed!"
else:
print "OK!"
return role_name
def migrate_restricted_collection(collection_name, role_name):
"""Migrate a single collection restriction."""
print "Adding authorization to role '%s' for viewing collection '%s'..." % (role_name, collection_name),
acc_add_authorization(role_name, VIEWRESTRCOLL, collection=collection_name)
print "OK!"
def check_viewrestrcoll_exists():
"""Security check for VIEWRESTRCOLL to exist."""
res = acc_get_action_id(VIEWRESTRCOLL)
if not res:
print "ERROR: %s action does not exist!" % VIEWRESTRCOLL
print "Please run first webaccessadmin -a in order to update the system"
print "to newly added actions."
sys.exit(1)
def migrate():
"""Core."""
check_viewrestrcoll_exists()
restrictions = retrieve_restricted_collection()
apache_groups = set(restrictions.values())
print "%i restrictions to migrate" % len(restrictions.keys())
print "%i roles to create" % len(apache_groups)
role_names = {}
for apache_group in apache_groups:
role_names[apache_group] = create_needed_roles(restrictions, apache_group)
for collection_name, apache_group in restrictions.iteritems():
migrate_restricted_collection(collection_name, role_names[apache_group])
if __name__ == "__main__":
migrate()
|
carolFrohlich/nipype
|
refs/heads/master
|
nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py
|
2
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import ApplyInverseDeformation
def test_ApplyInverseDeformation_inputs():
input_map = dict(bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb',
),
deformation=dict(field='comp{1}.inv.comp{1}.sn2def.matname',
xor=[u'deformation_field'],
),
deformation_field=dict(field='comp{1}.inv.comp{1}.def',
xor=[u'deformation'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(field='fnames',
mandatory=True,
),
interpolation=dict(field='interp',
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
paths=dict(),
target=dict(field='comp{1}.inv.space',
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
voxel_sizes=dict(field='comp{1}.inv.comp{1}.sn2def.vox',
),
)
inputs = ApplyInverseDeformation.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyInverseDeformation_outputs():
output_map = dict(out_files=dict(),
)
outputs = ApplyInverseDeformation.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
m-weigand/ccd_tools
|
refs/heads/master
|
lib/lib_dd/io/io_general.py
|
1
|
import lib_dd.io.ascii as ascii
import lib_dd.io.ascii_audit as ascii_audit
def _make_list(obj):
"""make sure the provided object is a list, if not, enclose it in one
"""
if not isinstance(obj, list):
return [obj, ]
else:
return obj
def save_fit_results(data, NDobj):
"""
Save results of all DD fits to files
Parameters
----------
data:
NDobj: one or more ND objects. This is either a ND object, or list of ND
objects
"""
NDlist = _make_list(NDobj)
output_format = data['options']['output_format']
if output_format == 'ascii':
ascii.save_data(data, NDlist)
elif output_format == 'ascii_audit':
ascii_audit.save_results(data, NDlist)
else:
raise Exception('Output format "{0}" not recognized!'.format(
output_format))
|
subutai/nupic.research
|
refs/heads/master
|
projects/whydense/cifar/cifar_tune.py
|
3
|
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
# Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
import argparse
import configparser
import logging
import os
import sys
from pathlib import Path
import ray
import torch
from ray import tune
from ray.tune.schedulers import MedianStoppingRule
from torchvision import datasets
from projects.whydense.cifar.cifar_experiment import TinyCIFAR
# Remove annoying messages saying training is taking too long
logging.getLogger("ray.tune.util").setLevel(logging.ERROR)
def trial_name_string(trial):
"""
Args:
trial (Trial): A generated trial object.
Returns:
trial_name (str): String representation of Trial.
"""
s = str(trial)
chars = "{}[]() ,="
for c in chars:
s = s.replace(c, "_")
if len(s) > 85:
s = s[0:75] + "_" + s[-10:]
return s
class CIFARTune(TinyCIFAR, tune.Trainable):
"""ray.tune trainable class for running small CIFAR models:
- Override _setup to reset the experiment for each trial.
- Override _train to train and evaluate each epoch
- Override _save and _restore to serialize the model
"""
def __init__(self, config=None, logger_creator=None):
TinyCIFAR.__init__(self)
tune.Trainable.__init__(self, config=config, logger_creator=logger_creator)
def _setup(self, config):
"""Custom initialization.
Args:
config (dict): Hyperparameters and other configs given.
Copy of `self.config`.
"""
self.model_setup(config)
def _train(self):
"""Implement train() for a single epoch.
Returns:
A dict that describes training progress.
"""
ret = self.train_epoch(self._iteration)
print("epoch", self._iteration, ":", ret)
return ret
def _save(self, checkpoint_dir):
return self.model_save(checkpoint_dir)
def _restore(self, checkpoint):
"""Subclasses should override this to implement restore().
Args:
checkpoint (str | dict): Value as returned by `_save`.
If a string, then it is the checkpoint path.
"""
self.model_restore(checkpoint)
def _stop(self):
"""Subclasses should override this for any cleanup on stop."""
if self._iteration < self.iterations:
print("CIFARTune: stopping early at epoch {}".format(self._iteration))
@ray.remote
def run_experiment(config, trainable):
"""Run a single tune experiment in parallel as a "remote" function.
:param config: The experiment configuration
:type config: dict
:param trainable: tune.Trainable class with your experiment
:type trainable: :class:`ray.tune.Trainable`
"""
# Stop criteria. Default to total number of iterations/epochs
stop_criteria = {"training_iteration": config.get("iterations")}
stop_criteria.update(config.get("stop", {}))
tune.run(
trainable,
name=config["name"],
local_dir=config["path"],
stop=stop_criteria,
config=config,
num_samples=config.get("repetitions", 1),
search_alg=config.get("search_alg", None),
scheduler=config.get(
"scheduler",
MedianStoppingRule(
time_attr="training_iteration",
reward_attr="noise_accuracy",
min_samples_required=3,
grace_period=20,
verbose=False,
),
),
trial_name_creator=tune.function(trial_name_string),
trial_executor=config.get("trial_executor", None),
checkpoint_at_end=config.get("checkpoint_at_end", False),
checkpoint_freq=config.get("checkpoint_freq", 0),
upload_dir=config.get("upload_dir", None),
sync_function=config.get("sync_function", None),
resume=config.get("resume", False),
reuse_actors=config.get("reuse_actors", False),
verbose=config.get("verbose", 0),
resources_per_trial={
# With lots of trials, optimal seems to be 0.5, or 2 trials per GPU
# If num trials <= num GPUs, 1.0 is better
"cpu": 1,
"gpu": config.get("gpu_percentage", 0.5),
},
)
def parse_config(config_file, experiments=None):
"""Parse configuration file optionally filtering for specific
experiments/sections.
:param config_file: Configuration file
:param experiments: Optional list of experiments
:return: Dictionary with the parsed configuration
"""
cfgparser = configparser.ConfigParser()
cfgparser.read_file(config_file)
params = {}
for exp in cfgparser.sections():
if not experiments or exp in experiments:
values = cfgparser.defaults()
values.update(dict(cfgparser.items(exp)))
item = {}
for k, v in values.items():
try:
item[k] = eval(v)
except (NameError, SyntaxError):
item[k] = v
params[exp] = item
return params
def parse_options():
"""parses the command line options for different settings."""
optparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
optparser.add_argument(
"-c",
"--config",
dest="config",
type=open,
default="tiny_experiments.cfg",
help="your experiments config file",
)
optparser.add_argument(
"-n",
"--num_cpus",
dest="num_cpus",
type=int,
default=os.cpu_count() - 1,
help="number of cpus you want to use",
)
optparser.add_argument(
"-g",
"--num_gpus",
dest="num_gpus",
type=int,
default=torch.cuda.device_count(),
help="number of gpus you want to use",
)
optparser.add_argument(
"-e",
"--experiment",
action="append",
dest="experiments",
help="run only selected experiments, by default run "
"all experiments in config file.",
)
return optparser.parse_args()
if __name__ == "__main__":
# Load and parse command line option and experiment configurations
options = parse_options()
configs = parse_config(options.config, options.experiments)
if len(configs) == 0:
print("No experiments to run!")
sys.exit()
# Use configuration file location as the project location.
project_dir = os.path.dirname(options.config.name)
project_dir = os.path.abspath(project_dir)
# Default data dir if not specified
data_dir = os.path.join(os.environ["HOME"], "nta", "data")
data_dir = configs.get("data_dir", data_dir)
print("Using torch version", torch.__version__)
print("Torch device count=", torch.cuda.device_count())
# Initialize ray cluster
if "REDIS_ADDRESS" in os.environ:
ray.init(redis_address=os.environ["REDIS_ADDRESS"], include_webui=True)
else:
ray.init(
num_cpus=options.num_cpus,
num_gpus=options.num_gpus,
local_mode=options.num_cpus == 1,
)
if len(configs) == 0:
print("No matching experiments found!")
# Run all experiments in parallel
results = []
for exp in configs:
config = configs[exp]
config["name"] = exp
config["num_cpus"] = options.num_cpus
config["num_gpus"] = options.num_gpus
print("GPU percentage=", config.get("gpu_percentage", 0.5))
# Make sure local directories are relative to the project location
path = config.get("path", "results")
path = Path(path).expanduser().resolve()
config["path"] = path
data_dir = config.get("data_dir", data_dir)
data_dir = Path(data_dir).expanduser().resolve()
config["data_dir"] = data_dir
# Pre-download dataset
train_dataset = datasets.CIFAR10(data_dir, download=True, train=True)
# When running multiple hyperparameter searches on different experiments,
# ray.tune will run one experiment at the time. We use "ray.remote" to
# run each tune experiment in parallel as a "remote" function and wait until
# all experiments complete
results.append(run_experiment.remote(config, CIFARTune))
# Wait for all experiments to complete
ray.get(results)
ray.shutdown()
|
mcella/django
|
refs/heads/master
|
django/db/migrations/recorder.py
|
478
|
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
|
shitizadmirer/unimap.ns-3noc
|
refs/heads/master
|
bindings/python/apidefs/gcc-LP64/callbacks_list.py
|
5
|
callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Address const&', 'ns3::Address const&', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::UanAddress const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::PacketBurst const>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Mac48Address', 'ns3::Mac48Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'ns3::Mac48Address', 'unsigned int', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned int', 'ns3::Mac48Address', 'ns3::Mac48Address', 'ns3::dot11s::PeerLink::PeerState', 'ns3::dot11s::PeerLink::PeerState', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned int', 'ns3::Mac48Address', 'ns3::Ptr<ns3::MeshWifiInterfaceMac>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::WifiMacHeader const*', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['std::vector<ns3::Mac48Address, std::allocator<ns3::Mac48Address> >', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Mac48Address', 'ns3::Mac48Address', 'unsigned short', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::WifiMacHeader const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'std::string', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'bool', 'unsigned long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'unsigned char', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::ArpCache const>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
nickburlett/feincms
|
refs/heads/master
|
feincms/module/page/extensions/navigation.py
|
1
|
"""
Extend or modify the navigation with custom entries.
This extension allows the website administrator to select an extension
which processes, modifies or adds subnavigation entries. The bundled
``feincms_nav`` template tag knows how to collect navigation entries,
be they real Page instances or extended navigation entries.
"""
from __future__ import absolute_import, unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
import types
import warnings
from django.db import models
from django.utils import six
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
from feincms.utils import get_object
from feincms._internal import monkeypatch_method
class TypeRegistryMetaClass(type):
"""
You can access the list of subclasses as <BaseClass>.types
TODO use NavigationExtension.__subclasses__() instead?
"""
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'types'):
cls.types = []
else:
cls.types.append(cls)
class PagePretender(object):
"""
A PagePretender pretends to be a page, but in reality is just a shim layer
that implements enough functionality to inject fake pages eg. into the
navigation tree.
For use as fake navigation page, you should at least define the following
parameters on creation: title, url, level. If using the translation
extension, also add language.
"""
pk = None
# emulate mptt properties to get the template tags working
class _mptt_meta:
level_attr = 'level'
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def get_absolute_url(self):
return self.url
def get_navigation_url(self):
return self.get_absolute_url()
def get_level(self):
return self.level
def get_children(self):
""" overwrite this if you want nested extensions using recursetree """
return []
def available_translations(self):
return ()
def get_original_translation(self, page):
return page
def short_title(self):
from feincms.utils import shorten_string
return shorten_string(self.title)
class NavigationExtension(six.with_metaclass(TypeRegistryMetaClass)):
"""
Base class for all navigation extensions.
The name attribute is shown to the website administrator.
"""
name = _('navigation extension')
def children(self, page, **kwargs):
"""
This is the method which must be overridden in every navigation
extension.
It receives the page the extension is attached to, the depth up to
which the navigation should be resolved, and the current request object
if it is available.
"""
raise NotImplementedError
def navigation_extension_choices():
for ext in NavigationExtension.types:
if (issubclass(ext, NavigationExtension)
and ext is not NavigationExtension):
yield ('%s.%s' % (ext.__module__, ext.__name__), ext.name)
def get_extension_class(extension):
extension = get_object(extension)
if isinstance(extension, types.ModuleType):
return getattr(extension, 'Extension')
return extension
class Extension(extensions.Extension):
ident = 'navigation' # TODO actually use this
navigation_extensions = None
@cached_property
def _extensions(self):
if self.navigation_extensions is None:
warnings.warn(
'Automatic registering of navigation extensions has been'
' deprecated. Please inherit the extension and put a list'
' of dotted python paths into the navigation_extensions'
' class variable.', DeprecationWarning, stacklevel=3)
return OrderedDict(
('%s.%s' % (ext.__module__, ext.__name__), ext)
for ext in NavigationExtension.types
if (
issubclass(ext, NavigationExtension)
and ext is not NavigationExtension))
else:
return OrderedDict(
('%s.%s' % (ext.__module__, ext.__name__), ext)
for ext
in map(get_extension_class, self.navigation_extensions))
def handle_model(self):
choices = [
(path, ext.name) for path, ext in self._extensions.items()]
self.model.add_to_class(
'navigation_extension',
models.CharField(
_('navigation extension'),
choices=choices,
blank=True, null=True, max_length=200,
help_text=_(
'Select the module providing subpages for this page if'
' you need to customize the navigation.')))
extension = self
@monkeypatch_method(self.model)
def extended_navigation(self, **kwargs):
if not self.navigation_extension:
return self.children.in_navigation()
cls = None
try:
cls = extension._extensions[self.navigation_extension]
except KeyError:
cls = get_object(self.navigation_extension, fail_silently=True)
extension._extensions[self.navigation_extension] = cls
if cls:
return cls().children(self, **kwargs)
return self.children.in_navigation()
def handle_modeladmin(self, modeladmin):
modeladmin.add_extension_options(_('Navigation extension'), {
'fields': ('navigation_extension',),
'classes': ('collapse',),
})
|
muffinresearch/addons-server
|
refs/heads/master
|
apps/access/helpers.py
|
22
|
import jinja2
from jingo import register
import acl
@register.function
@jinja2.contextfunction
def check_ownership(context, object, require_owner=False,
require_author=False, ignore_disabled=True):
return acl.check_ownership(context['request'], object,
require_owner=require_owner,
require_author=require_author,
ignore_disabled=ignore_disabled)
@register.function
@jinja2.contextfunction
def action_allowed(context, app, action):
return acl.action_allowed(context['request'], app, action)
|
Carlosmr/WhooshSearcher
|
refs/heads/master
|
searcher/wsgi.py
|
1
|
"""
WSGI config for searcher project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "searcher.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "searcher.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
abdulhaq-e/django-rest-framework
|
refs/heads/master
|
rest_framework/filters.py
|
9
|
"""
Provides generic filtering backends that can be used to filter the results
returned by list views.
"""
from __future__ import unicode_literals
import operator
from functools import reduce
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.template import loader
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import (
crispy_forms, distinct, django_filters, guardian, template_render
)
from rest_framework.settings import api_settings
if 'crispy_forms' in settings.INSTALLED_APPS and crispy_forms and django_filters:
# If django-crispy-forms is installed, use it to get a bootstrap3 rendering
# of the DjangoFilterBackend controls when displayed as HTML.
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
class FilterSet(django_filters.FilterSet):
def __init__(self, *args, **kwargs):
super(FilterSet, self).__init__(*args, **kwargs)
for field in self.form.fields.values():
field.help_text = None
layout_components = list(self.form.fields.keys()) + [
Submit('', _('Submit'), css_class='btn-default'),
]
helper = FormHelper()
helper.form_method = 'GET'
helper.template_pack = 'bootstrap3'
helper.layout = Layout(*layout_components)
self.form.helper = helper
filter_template = 'rest_framework/filters/django_filter_crispyforms.html'
elif django_filters:
# If django-crispy-forms is not installed, use the standard
# 'form.as_p' rendering when DjangoFilterBackend is displayed as HTML.
class FilterSet(django_filters.FilterSet):
def __init__(self, *args, **kwargs):
super(FilterSet, self).__init__(*args, **kwargs)
for field in self.form.fields.values():
field.help_text = None
filter_template = 'rest_framework/filters/django_filter.html'
else:
FilterSet = None
filter_template = None
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
class DjangoFilterBackend(BaseFilterBackend):
"""
A filter backend that uses django-filter.
"""
default_filter_set = FilterSet
template = filter_template
def __init__(self):
assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'
def get_filter_class(self, view, queryset=None):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(queryset.model, filter_model), \
'FilterSet model %s does not match queryset model %s' % \
(filter_model, queryset.model)
return filter_class
if filter_fields:
class AutoFilterSet(self.default_filter_set):
class Meta:
model = queryset.model
fields = filter_fields
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
return filter_class(request.query_params, queryset=queryset).qs
return queryset
def to_html(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if not filter_class:
return None
filter_instance = filter_class(request.query_params, queryset=queryset)
context = {
'filter': filter_instance
}
template = loader.get_template(self.template)
return template_render(template, context)
class SearchFilter(BaseFilterBackend):
# The URL query parameter used for the search.
search_param = api_settings.SEARCH_PARAM
template = 'rest_framework/filters/search.html'
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = request.query_params.get(self.search_param, '')
return params.replace(',', ' ').split()
def construct_search(self, field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
if field_name.startswith('$'):
return "%s__iregex" % field_name[1:]
else:
return "%s__icontains" % field_name
def filter_queryset(self, request, queryset, view):
search_fields = getattr(view, 'search_fields', None)
search_terms = self.get_search_terms(request)
if not search_fields or not search_terms:
return queryset
orm_lookups = [
self.construct_search(six.text_type(search_field))
for search_field in search_fields
]
base = queryset
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
queryset = queryset.filter(reduce(operator.or_, queries))
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
return distinct(queryset, base)
def to_html(self, request, queryset, view):
if not getattr(view, 'search_fields', None):
return ''
term = self.get_search_terms(request)
term = term[0] if term else ''
context = {
'param': self.search_param,
'term': term
}
template = loader.get_template(self.template)
return template_render(template, context)
class OrderingFilter(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
template = 'rest_framework/filters/ordering.html'
def get_ordering(self, request, queryset, view):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
ordering = self.remove_invalid_fields(queryset, fields, view)
if ordering:
return ordering
# No ordering was included, or all the ordering fields were invalid
return self.get_default_ordering(view)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, six.string_types):
return (ordering,)
return ordering
def get_valid_fields(self, queryset, view):
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
serializer_class = getattr(view, 'serializer_class')
if serializer_class is None:
msg = ("Cannot use %s on a view which does not have either a "
"'serializer_class' or 'ordering_fields' attribute.")
raise ImproperlyConfigured(msg % self.__class__.__name__)
valid_fields = [
(field.source or field_name, field.label)
for field_name, field in serializer_class().fields.items()
if not getattr(field, 'write_only', False) and not field.source == '*'
]
elif valid_fields == '__all__':
# View explicitly allows filtering on any model field
valid_fields = [
(field.name, getattr(field, 'label', field.name.title()))
for field in queryset.model._meta.fields
]
valid_fields += [
(key, key.title().split('__'))
for key in queryset.query.aggregates.keys()
]
else:
valid_fields = [
(item, item) if isinstance(item, six.string_types) else item
for item in valid_fields
]
return valid_fields
def remove_invalid_fields(self, queryset, fields, view):
valid_fields = [item[0] for item in self.get_valid_fields(queryset, view)]
return [term for term in fields if term.lstrip('-') in valid_fields]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_template_context(self, request, queryset, view):
current = self.get_ordering(request, queryset, view)
current = None if current is None else current[0]
options = []
for key, label in self.get_valid_fields(queryset, view):
options.append((key, '%s - ascending' % label))
options.append(('-' + key, '%s - descending' % label))
return {
'request': request,
'current': current,
'param': self.ordering_param,
'options': options,
}
def to_html(self, request, queryset, view):
template = loader.get_template(self.template)
context = self.get_template_context(request, queryset, view)
return template_render(template, context)
class DjangoObjectPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def __init__(self):
assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'
perm_format = '%(app_label)s.view_%(model_name)s'
def filter_queryset(self, request, queryset, view):
extra = {}
user = request.user
model_cls = queryset.model
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': model_cls._meta.model_name
}
permission = self.perm_format % kwargs
if guardian.VERSION >= (1, 3):
# Maintain behavior compatibility with versions prior to 1.3
extra = {'accept_global_perms': False}
else:
extra = {}
return guardian.shortcuts.get_objects_for_user(user, permission, queryset, **extra)
|
user-none/calibre
|
refs/heads/master
|
src/calibre/utils/linux_trash.py
|
14
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
import os, stat
import os.path as op
from datetime import datetime
from urllib import quote
FILES_DIR = 'files'
INFO_DIR = 'info'
INFO_SUFFIX = '.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = op.expanduser(os.environ.get('XDG_DATA_HOME', '~/.local/share'))
HOMETRASH = op.join(XDG_DATA_HOME, 'Trash')
uid = os.getuid()
TOPDIR_TRASH = '.Trash'
TOPDIR_FALLBACK = '.Trash-%s'%uid
def uniquote(raw):
if isinstance(raw, unicode):
raw = raw.encode('utf-8')
return quote(raw).decode('utf-8')
def is_parent(parent, path):
path = op.realpath(path) # In case it's a symlink
parent = op.realpath(parent)
return path.startswith(parent)
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".."" directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir is None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + uniquote(src) + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = '%s %s%s' % (base_name, counter, ext)
check_create(filespath)
check_create(infopath)
os.rename(src, op.join(filespath, destname))
with open(op.join(infopath, destname + INFO_SUFFIX), 'wb') as f:
f.write(info_for(src, topdir))
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
# Use realpath in case it's a symlink
path = op.realpath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, str(uid))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we can't the OSError exception will escape
# be thrown out of send2trash.
check_create(trash_dir)
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir is None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
# Pull this out so it's easy to stub (to avoid stubbing lstat itself)
def get_dev(path):
return os.lstat(path).st_dev
def send2trash(path):
if not op.exists(path):
raise OSError("File not found: %s" % path)
# ...should check whether the user has the necessary permissions to delete
# it, before starting the trashing operation itself. [2]
if not os.access(path, os.W_OK):
raise OSError("Permission denied: %s" % path)
# if the file to be trashed is on the same device as HOMETRASH we
# want to move it there.
path_dev = get_dev(path)
# If XDG_DATA_HOME or HOMETRASH do not yet exist we need to stat the
# home directory, and these paths will be created further on if needed.
trash_dev = get_dev(op.expanduser('~'))
if path_dev == trash_dev:
topdir = XDG_DATA_HOME
dest_trash = HOMETRASH
else:
topdir = find_mount_point(path)
trash_dev = get_dev(topdir)
if trash_dev != path_dev:
raise OSError("Couldn't find mount point for %s" % path)
dest_trash = find_ext_volume_trash(topdir)
trash_move(path, dest_trash, topdir)
|
shahankhatch/scikit-learn
|
refs/heads/master
|
sklearn/covariance/graph_lasso_.py
|
10
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, ensure_min_samples=2)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
|
sameerparekh/pants
|
refs/heads/master
|
tests/python/pants_test/android/tasks/test_aapt_gen.py
|
15
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.tasks.aapt_gen import AaptGen
from pants_test.android.test_android_base import TestAndroidBase, distribution
class TestAaptGen(TestAndroidBase):
@classmethod
def task_type(cls):
return AaptGen
def test_aapt_gen_smoke(self):
task = self.create_task(self.context())
task.execute()
def test_relative_genfile(self):
with self.android_binary(package_name='org.pantsbuild.examples.hello') as binary:
self.assertEqual(AaptGen._relative_genfile(binary),
os.path.join('org', 'pantsbuild', 'examples', 'hello', 'R.java'))
def test_create_sdk_jar_deps(self):
with distribution() as dist:
with self.android_binary(target_name='binary1', target_sdk='18') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
targets = [binary1, binary2]
task.create_sdk_jar_deps(targets)
self.assertNotEquals(task._jar_library_by_sdk['19'], task._jar_library_by_sdk['18'])
def test_aapt_out_different_sdk(self):
with self.android_binary(target_name='binary1', target_sdk='18') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
task = self.create_task(self.context())
self.assertNotEqual(task.aapt_out(binary1), task.aapt_out(binary2))
def test_aapt_out_same_sdk(self):
with self.android_binary(target_name='binary1', target_sdk='19') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
task = self.create_task(self.context())
self.assertEquals(task.aapt_out(binary1), task.aapt_out(binary2))
def test_aapt_tool(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist, build_tools_version='20.0.0')
task = self.create_task(self.context())
aapt_tool = task.aapt_tool(android_binary)
self.assertEquals(os.path.basename(os.path.dirname(aapt_tool)), '20.0.0')
self.assertEquals(os.path.basename(aapt_tool), 'aapt')
def test_android_tool(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist, target_sdk='18')
task = self.create_task(self.context())
android_jar = task.android_jar(android_binary)
self.assertEquals(os.path.basename(os.path.dirname(android_jar)), 'android-18')
self.assertEquals(os.path.basename(android_jar), 'android.jar')
def test_render_args(self):
with distribution() as dist:
with self.android_resources() as resources:
with self.android_binary(dependencies=[resources]) as binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
rendered_args = task._render_args(binary, binary.manifest, [resources.resource_dir])
self.assertEquals(os.path.basename(rendered_args[0]), 'aapt')
def test_priority_order_in_render_args(self):
with distribution() as dist:
with self.android_resources(target_name='binary_resources') as res1:
with self.android_resources(target_name='library_resources') as res2:
with self.android_library(dependencies=[res2]) as library:
with self.android_binary(dependencies=[res1, library]) as binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
res_dirs = [res1.resource_dir, res2.resource_dir]
rendered_args = task._render_args(binary, binary.manifest, res_dirs)
args_string = ' '.join(rendered_args)
self.assertIn('--auto-add-overlay -S {} -S '
'{}'.format(res1.resource_dir, res2.resource_dir), args_string)
def test_render_args_force_ignored(self):
with distribution() as dist:
with self.android_resources() as resources:
with self.android_binary(dependencies=[resources]) as binary:
ignored = '!picasa.ini:!*~:BUILD*'
self.set_options(sdk_path=dist, ignored_assets=ignored)
task = self.create_task(self.context())
rendered_args = task._render_args(binary, binary.manifest, [resources.resource_dir])
self.assertIn(ignored, rendered_args)
def test_create_target(self):
with distribution() as dist:
with self.android_library() as library:
with self.android_binary(dependencies=[library]) as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
targets = [android_binary]
task.create_sdk_jar_deps(targets)
created_target = task.create_target(android_binary, library)
self.assertEqual(created_target.derived_from, library)
self.assertTrue(created_target.is_synthetic)
|
BayanGroup/sentry
|
refs/heads/master
|
src/sentry/templatetags/sentry_status.py
|
23
|
from __future__ import absolute_import, print_function
from django import template
from sentry import status_checks
register = template.Library()
@register.inclusion_tag('sentry/partial/system-status.html', takes_context=True)
def show_system_status(context):
problems, _ = status_checks.check_all()
return {
'problems': problems,
}
|
odejesush/tensorflow
|
refs/heads/master
|
venv/lib/python2.7/site-packages/flask/_compat.py
|
121
|
# -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
# Certain versions of pypy have a bug where clearing the exception stack
# breaks the __exit__ function in a very peculiar way. The second level of
# exception blocks is necessary because pypy seems to forget to check if an
# exception happened until the next bytecode instruction?
#
# Relevant PyPy bugfix commit:
# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301
# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later
# versions.
#
# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug.
BROKEN_PYPY_CTXMGR_EXIT = False
if hasattr(sys, 'pypy_version_info'):
class _Mgr(object):
def __enter__(self):
return self
def __exit__(self, *args):
if hasattr(sys, 'exc_clear'):
# Python 3 (PyPy3) doesn't have exc_clear
sys.exc_clear()
try:
try:
with _Mgr():
raise AssertionError()
except:
raise
except TypeError:
BROKEN_PYPY_CTXMGR_EXIT = True
except AssertionError:
pass
|
smirnoffs/karsender
|
refs/heads/master
|
opencart/tests.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from unittest.mock import patch
from karsender import Config
from opencart.services import get_last_success_import, get_last_orders
from karsender.database import get_collection
__author__ = 'Sergey Smirnov <smirnoffs@gmail.com>'
import unittest
class MockConfig(Config):
ORDERS_COLLECTION = 'test_collection'
mock = patch.object(Config, 'ORDERS_COLLECTION', 'test_collection')
class TestOrderExport(unittest.TestCase):
def test_get_last_orders(self):
orders = get_last_orders()
# self.assertIsNotNone(orders)
# self.assertGreater(len(orders), 0)
# self.assertTrue(all(isinstance(order, Order) for order in orders))
def setUp(self):
mock.start()
self.collection = get_collection()
def test_get_last_success_import(self):
last_import = get_last_success_import()
self.assertIsInstance(last_import, datetime.datetime)
self.assertAlmostEqual(last_import, datetime.datetime(year=1980, month=1, day=1))
success_date = datetime.datetime(year=2014, month=2, day=12, hour=12, minute=4, second=14)
self.collection.insert_one({'success': True, 'datetime': success_date})
self.collection.insert_one({'success': False, 'datetime': datetime.datetime.now()})
self.assertIsInstance(last_import, datetime.datetime)
self.assertEqual(get_last_success_import(), success_date)
def tearDown(self):
self.collection.drop()
mock.stop()
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py
|
13
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that provides a warning if the wrapped object is never used."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import traceback
import types
import six # pylint: disable=unused-import
from backports import weakref # pylint: disable=g-bad-import-order
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_decorator
class _RefInfoField(
collections.namedtuple(
'_RefInfoField', ('type_', 'repr_', 'creation_stack', 'object_used'))):
pass
# Thread-safe up to int32max/2 thanks to python's GIL; and may be safe even for
# higher values in Python 3.4+. We don't expect to ever count higher than this.
# https://mail.python.org/pipermail/python-list/2005-April/342279.html
_REF_ITER = itertools.count()
# Dictionary mapping id(obj) => _RefInfoField.
_REF_INFO = {}
def _deleted(obj_id, fatal_error):
obj = _REF_INFO[obj_id]
del _REF_INFO[obj_id]
if not obj.object_used:
if fatal_error:
logger = tf_logging.fatal
else:
logger = tf_logging.error
logger(
'==================================\n'
'Object was never used (type %s):\n%s\nIf you want to mark it as '
'used call its "mark_used()" method.\nIt was originally created '
'here:\n%s\n'
'==================================' %
(obj.type_, obj.repr_, obj.creation_stack))
def _add_should_use_warning(x, fatal_error=False):
"""Wraps object x so that if it is never used, a warning is logged.
Args:
x: Python object.
fatal_error: Python bool. If `True`, tf.logging.fatal is raised
if the returned value is never used.
Returns:
An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)`
and is a very shallow wrapper for `x` which logs access into `x`.
"""
if x is None: # special corner case where x is None
return x
if hasattr(x, '_tf_ref_id'): # this is already a TFShouldUseWarningWrapper
return x
def override_method(method):
def fn(self, *args, **kwargs):
# pylint: disable=protected-access
_REF_INFO[self._tf_ref_id] = _REF_INFO[self._tf_ref_id]._replace(
object_used=True)
return method(self, *args, **kwargs)
return fn
class TFShouldUseWarningWrapper(type(x)):
"""Wrapper for objects that keeps track of their use."""
def __init__(self, true_self):
self.__dict__ = true_self.__dict__
stack = [s.strip() for s in traceback.format_stack()]
# Remove top three stack entries from adding the wrapper
self.creation_stack = '\n'.join(stack[:-3])
self._tf_ref_id = next(_REF_ITER)
_REF_INFO[self._tf_ref_id] = _RefInfoField(
type_=type(x),
repr_=repr(x),
creation_stack=stack,
object_used=False)
# Create a finalizer for self, which will be called when self is
# garbage collected. Can't add self as the args because the
# loop will break garbage collection. We keep track of
# ourselves via python ids.
weakref.finalize(self, _deleted, self._tf_ref_id, fatal_error)
# Not sure why this pylint warning is being used; this is not an
# old class form.
# pylint: disable=super-on-old-class
def __getattribute__(self, name):
if name == '_tf_ref_id':
return super(TFShouldUseWarningWrapper, self).__getattribute__(name)
if self._tf_ref_id in _REF_INFO:
_REF_INFO[self._tf_ref_id] = _REF_INFO[self._tf_ref_id]._replace(
object_used=True)
return super(TFShouldUseWarningWrapper, self).__getattribute__(name)
def mark_used(self, *args, **kwargs):
_REF_INFO[self._tf_ref_id] = _REF_INFO[self._tf_ref_id]._replace(
object_used=True)
if hasattr(super(TFShouldUseWarningWrapper, self), 'mark_used'):
return super(TFShouldUseWarningWrapper, self).mark_used(*args, **kwargs)
# pylint: enable=super-on-old-class
for name in dir(TFShouldUseWarningWrapper):
method = getattr(TFShouldUseWarningWrapper, name)
if not isinstance(method, types.FunctionType):
continue
if name in ('__init__', '__getattribute__', '__del__', 'mark_used'):
continue
setattr(TFShouldUseWarningWrapper, name,
functools.wraps(method)(override_method(method)))
wrapped = TFShouldUseWarningWrapper(x)
wrapped.__doc__ = x.__doc__ # functools.wraps fails on some objects.
ref_id = wrapped._tf_ref_id # pylint: disable=protected-access
_REF_INFO[ref_id] = _REF_INFO[ref_id]._replace(object_used=False)
return wrapped
def should_use_result(fn):
"""Function wrapper that ensures the function's output is used.
If the output is not used, a `tf.logging.error` is logged.
An output is marked as used if any of its attributes are read, modified, or
updated. Examples when the output is a `Tensor` include:
- Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
- Accessing a property (e.g. getting `t.name` or `t.op`).
Note, certain behaviors cannot be tracked - for these the object may not
be marked as used. Examples include:
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
Args:
fn: The function to wrap.
Returns:
The wrapped function.
"""
def wrapped(*args, **kwargs):
return _add_should_use_warning(fn(*args, **kwargs))
return tf_decorator.make_decorator(
fn, wrapped, 'should_use_result',
((fn.__doc__ or '') +
('\n\n '
'**NOTE** The output of this function should be used. If it is not, '
'a warning will be logged. To mark the output as used, '
'call its .mark_used() method.')))
def must_use_result_or_fatal(fn):
"""Function wrapper that ensures the function's output is used.
If the output is not used, a `tf.logging.fatal` error is raised.
An output is marked as used if any of its attributes are read, modified, or
updated. Examples when the output is a `Tensor` include:
- Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
- Accessing a property (e.g. getting `t.name` or `t.op`).
Note, certain behaviors cannot be tracked - for these the object may not
be marked as used. Examples include:
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
Args:
fn: The function to wrap.
Returns:
The wrapped function.
"""
def wrapped(*args, **kwargs):
return _add_should_use_warning(fn(*args, **kwargs), fatal_error=True)
return tf_decorator.make_decorator(
fn, wrapped, 'must_use_result_or_fatal',
((fn.__doc__ or '') +
('\n\n '
'**NOTE** The output of this function must be used. If it is not, '
'a fatal error will be raised. To mark the output as used, '
'call its .mark_used() method.')))
|
jasonbot/django
|
refs/heads/master
|
tests/servers/views.py
|
384
|
from django.http import HttpResponse
from .models import Person
def example_view(request):
return HttpResponse('example view')
def model_view(request):
people = Person.objects.all()
return HttpResponse('\n'.join(person.name for person in people))
def create_model_instance(request):
person = Person(name='emily')
person.save()
return HttpResponse('')
def environ_view(request):
return HttpResponse("\n".join("%s: %r" % (k, v) for k, v in request.environ.items()))
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_0_96/django/utils/autoreload.py
|
48
|
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time
try:
import thread
except ImportError:
import dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading
except ImportError:
pass
RUN_RELOADER = True
def reloader_thread():
mtimes = {}
win = (sys.platform == "win32")
while RUN_RELOADER:
for filename in filter(lambda v: v, map(lambda m: getattr(m, "__file__", None), sys.modules.values())):
if filename.endswith(".pyc") or filename.endswith("*.pyo"):
filename = filename[:-1]
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if win:
mtime -= stat.st_ctime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime != mtimes[filename]:
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def main(main_func, args=None, kwargs=None):
if os.environ.get("RUN_MAIN") == "true":
if args is None:
args = ()
if kwargs is None:
kwargs = {}
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
|
harnash/sparrow
|
refs/heads/master
|
apps/results/migrations/0003_auto_20150529_1146.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0002_testrawresult'),
]
operations = [
migrations.AlterField(
model_name='testrawresult',
name='data',
field=jsonfield.fields.JSONField(default={}),
),
]
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/building/player/shared_player_city_capitol_corellia_style_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/shared_player_city_capitol_corellia_style_01.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Vrturo/FrontEndLabz
|
refs/heads/master
|
techTutorials/gulp/node_modules/node-gyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
amitgroup/parts-net
|
refs/heads/master
|
scripts/train_parts.py
|
1
|
from __future__ import division, print_function, absolute_import
import numpy as np
import amitgroup as ag
import pnet
import pnet.data
if __name__ == '__main__':
ag.set_verbose(True)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('size', metavar='<part size>', type=int)
parser.add_argument('data', metavar='<data file>',
type=str,
help='Filename of data file')
parser.add_argument('seed', type=int, default=1)
parser.add_argument('save_file', metavar='<output file>',
type=str,
help='Filename of savable model file')
parser.add_argument('--factor', '-f', type=float)
parser.add_argument('--count', '-c', type=int)
args = parser.parse_args()
part_size = args.size
save_file = args.save_file
seed = args.seed
data = pnet.data.load_data(args.data)
data = data[:args.count]
if data.ndim == 3:
data = data[..., np.newaxis]
print('Using', args.count)
unsup_training_times = []
sup_training_times = []
testing_times = []
error_rates = []
all_num_parts = []
for training_seed in [seed]:
layers = []
if args.factor is not None:
layers = [
pnet.ResizeLayer(factor=args.factor, make_gray=False),
]
if 1:
settings = dict(n_iter=10,
seed=0,
n_init=1,
standardize=True,
samples_per_image=100,
#max_samples=300000,
max_samples=200000,
uniform_weights=True,
max_covariance_samples=20000,
covariance_type='tied',
logratio_thresh=-np.inf,
std_thresh_frame=0,
channel_mode='together',
coding='soft',
normalize_globally=False,
min_covariance=0.0075,
std_thresh=0.01,
standardization_epsilon=10 / 255,
code_bkg=False,
whitening_epsilon=None,
min_count=0,
)
layers += [
pnet.OrientedGaussianPartsLayer(n_parts=1000, n_orientations=1,
part_shape=(6, 6),
settings=settings),
]
elif 0:
if 0:
settings = dict(n_iter=20,
seed=0,
n_init=5,
standardize=True,
samples_per_image=50,
max_samples=20000,
uniform_weights=True,
#max_covariance_samples=10000,
covariance_type='diag',
logratio_thresh=-np.inf,
std_thresh_frame=0,
channel_mode='separate',
normalize_globally=False,
min_covariance=0.1,
std_thresh=0.01,
standardization_epsilon=10 / 255,
code_bkg=False,
whitening_epsilon=None,
)
layers += [
pnet.OrientedGaussianPartsLayer(n_parts=24, n_orientations=1,
part_shape=(3, 3),
settings=settings),
]
else:
layers += [
pnet.KMeansPartsLayer(24, (4, 4), settings=dict(
seed=0,
n_per_image=50,
n_samples=20000,
n_init=5,
max_iter=300,
n_jobs=1,
random_centroids=False,
code_bkg=False,
std_thresh=0.01,
whitening_epsilon=0.1,
coding='triangle',
)),
]
if 0:
layers += [
pnet.PoolingLayer(final_shape=(14, 14), operation='sum'),
pnet.OrientedGaussianPartsLayer(n_parts=400,
n_orientations=1,
part_shape=(4, 4),
settings=dict(
n_iter=20,
seed=training_seed,
n_init=1,
standardize=False,
samples_per_image=100,
max_samples=10000,
max_covariance_samples=1000,
uniform_weights=True,
covariance_type='tied',
logratio_thresh=-np.inf,
std_thresh_frame=0,
channel_mode='together',
min_covariance=0.1,
std_thresh=0.01,
code_bkg=False,
whitening_epsilon=None,
)),
]
elif 0:
layers += [
pnet.PoolingLayer(shape=(3, 3), strides=(1, 1)),
pnet.OrientedPartsLayer(n_parts=500,
n_orientations=1,
part_shape=(6, 6),
settings=dict(outer_frame=1,
seed=training_seed,
threshold=2,
samples_per_image=100,
max_samples=300000,
min_covariance=0.1,
std_thresh=0.01,
standardization_epsilon=10 / 255,
standardize=False,
min_prob=0.0005,)),
]
else:
layers += [
#pnet.PoolingLayer(shape=(2, 2), strides=(1, 1), settings=dict(output_dtype='float32')),
pnet.KMeansPartsLayer(400, (6, 6), settings=dict(
seed=0,
n_per_image=100,
n_samples=100000,
n_init=1,
max_iter=300,
n_jobs=1,
random_centroids=False,
code_bkg=False,
std_thresh=0.01,
whitening_epsilon=0.1,
standardize=False,
whiten=False,
coding='triangle',
)),
]
elif 0:
layers += [
pnet.OrientedPartsLayer(numParts, num_orientations, (part_size, part_size), settings=dict(outer_frame=2,
em_seed=training_seed,
n_iter=5,
n_init=1,
threshold=2,
#samples_per_image=20,
samples_per_image=50,
max_samples=80000,
#max_samples=5000,
#max_samples=100000,
#max_samples=2000,
rotation_spreading_radius=0,
min_prob=0.0005,
bedges=dict(
k=5,
minimum_contrast=0.05,
spread='orthogonal',
#spread='box',
radius=1,
#pre_blurring=1,
contrast_insensitive=False,
),
)),
]
elif 0:
layers += [
pnet.GaussianPartsLayer(1000, (part_size, part_size), settings=dict(
seed=0,
covariance_type='tied',
n_per_image=20,
#n_samples=50000,
n_samples=40000,
)),
]
elif 1:
layers += [
pnet.KMeansPartsLayer(400, (part_size, part_size), settings=dict(
seed=training_seed,
n_per_image=100,
n_samples=100000,
n_init=1,
max_iter=300,
n_jobs=1,
random_centroids=False,
code_bkg=False,
std_thresh=0.01,
whitening_epsilon=0.1,
coding='soft',
)),
]
elif 1:
layers += [
pnet.EdgeLayer(k=5, radius=1, spread='orthogonal', minimum_contrast=0.05),
pnet.BinaryTreePartsLayer(8, (part_size, part_size), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=100000,
train_limit=10000,
min_prob=0.005,
#keypoint_suppress_radius=1,
min_samples_per_part=50,
split_criterion='IG',
min_information_gain=0.01,
)),
]
else:
layers += [
pnet.EdgeLayer(
k=5,
minimum_contrast=0.05,
spread='orthogonal',
radius=1,
contrast_insensitive=False,
),
pnet.PartsLayer(numParts, (6, 6), settings=dict(outer_frame=1,
em_seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=100000,
#max_samples=30000,
train_limit=10000,
min_prob=0.00005,
)),
]
net = pnet.PartsNet(layers)
print(net)
print('Training parts')
net.train(lambda _: _, data)
print('Done.')
net.save(save_file)
|
tgalal/python-axolotl
|
refs/heads/master
|
axolotl/state/storageprotos_pb2.py
|
1
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: LocalStorageProtocol.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='LocalStorageProtocol.proto',
package='textsecure',
serialized_pb=_b('\n\x1aLocalStorageProtocol.proto\x12\ntextsecure\"\xd3\x08\n\x10SessionStructure\x12\x16\n\x0esessionVersion\x18\x01 \x01(\r\x12\x1b\n\x13localIdentityPublic\x18\x02 \x01(\x0c\x12\x1c\n\x14remoteIdentityPublic\x18\x03 \x01(\x0c\x12\x0f\n\x07rootKey\x18\x04 \x01(\x0c\x12\x17\n\x0fpreviousCounter\x18\x05 \x01(\r\x12\x37\n\x0bsenderChain\x18\x06 \x01(\x0b\x32\".textsecure.SessionStructure.Chain\x12:\n\x0ereceiverChains\x18\x07 \x03(\x0b\x32\".textsecure.SessionStructure.Chain\x12K\n\x12pendingKeyExchange\x18\x08 \x01(\x0b\x32/.textsecure.SessionStructure.PendingKeyExchange\x12\x41\n\rpendingPreKey\x18\t \x01(\x0b\x32*.textsecure.SessionStructure.PendingPreKey\x12\x1c\n\x14remoteRegistrationId\x18\n \x01(\r\x12\x1b\n\x13localRegistrationId\x18\x0b \x01(\r\x12\x14\n\x0cneedsRefresh\x18\x0c \x01(\x08\x12\x14\n\x0c\x61liceBaseKey\x18\r \x01(\x0c\x1a\xb9\x02\n\x05\x43hain\x12\x18\n\x10senderRatchetKey\x18\x01 \x01(\x0c\x12\x1f\n\x17senderRatchetKeyPrivate\x18\x02 \x01(\x0c\x12=\n\x08\x63hainKey\x18\x03 \x01(\x0b\x32+.textsecure.SessionStructure.Chain.ChainKey\x12\x42\n\x0bmessageKeys\x18\x04 \x03(\x0b\x32-.textsecure.SessionStructure.Chain.MessageKey\x1a&\n\x08\x43hainKey\x12\r\n\x05index\x18\x01 \x01(\r\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x1aJ\n\nMessageKey\x12\r\n\x05index\x18\x01 \x01(\r\x12\x11\n\tcipherKey\x18\x02 \x01(\x0c\x12\x0e\n\x06macKey\x18\x03 \x01(\x0c\x12\n\n\x02iv\x18\x04 \x01(\x0c\x1a\xcd\x01\n\x12PendingKeyExchange\x12\x10\n\x08sequence\x18\x01 \x01(\r\x12\x14\n\x0clocalBaseKey\x18\x02 \x01(\x0c\x12\x1b\n\x13localBaseKeyPrivate\x18\x03 \x01(\x0c\x12\x17\n\x0flocalRatchetKey\x18\x04 \x01(\x0c\x12\x1e\n\x16localRatchetKeyPrivate\x18\x05 \x01(\x0c\x12\x18\n\x10localIdentityKey\x18\x07 \x01(\x0c\x12\x1f\n\x17localIdentityKeyPrivate\x18\x08 \x01(\x0c\x1aJ\n\rPendingPreKey\x12\x10\n\x08preKeyId\x18\x01 \x01(\r\x12\x16\n\x0esignedPreKeyId\x18\x03 \x01(\x05\x12\x0f\n\x07\x62\x61seKey\x18\x02 \x01(\x0c\"\x7f\n\x0fRecordStructure\x12\x34\n\x0e\x63urrentSession\x18\x01 \x01(\x0b\x32\x1c.textsecure.SessionStructure\x12\x36\n\x10previousSessions\x18\x02 \x03(\x0b\x32\x1c.textsecure.SessionStructure\"J\n\x15PreKeyRecordStructure\x12\n\n\x02id\x18\x01 \x01(\r\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\x12\x12\n\nprivateKey\x18\x03 \x01(\x0c\"v\n\x1bSignedPreKeyRecordStructure\x12\n\n\x02id\x18\x01 \x01(\r\x12\x11\n\tpublicKey\x18\x02 \x01(\x0c\x12\x12\n\nprivateKey\x18\x03 \x01(\x0c\x12\x11\n\tsignature\x18\x04 \x01(\x0c\x12\x11\n\ttimestamp\x18\x05 \x01(\x06\"A\n\x18IdentityKeyPairStructure\x12\x11\n\tpublicKey\x18\x01 \x01(\x0c\x12\x12\n\nprivateKey\x18\x02 \x01(\x0c\"\xb8\x03\n\x17SenderKeyStateStructure\x12\x13\n\x0bsenderKeyId\x18\x01 \x01(\r\x12J\n\x0esenderChainKey\x18\x02 \x01(\x0b\x32\x32.textsecure.SenderKeyStateStructure.SenderChainKey\x12N\n\x10senderSigningKey\x18\x03 \x01(\x0b\x32\x34.textsecure.SenderKeyStateStructure.SenderSigningKey\x12O\n\x11senderMessageKeys\x18\x04 \x03(\x0b\x32\x34.textsecure.SenderKeyStateStructure.SenderMessageKey\x1a\x31\n\x0eSenderChainKey\x12\x11\n\titeration\x18\x01 \x01(\r\x12\x0c\n\x04seed\x18\x02 \x01(\x0c\x1a\x33\n\x10SenderMessageKey\x12\x11\n\titeration\x18\x01 \x01(\r\x12\x0c\n\x04seed\x18\x02 \x01(\x0c\x1a\x33\n\x10SenderSigningKey\x12\x0e\n\x06public\x18\x01 \x01(\x0c\x12\x0f\n\x07private\x18\x02 \x01(\x0c\"X\n\x18SenderKeyRecordStructure\x12<\n\x0fsenderKeyStates\x18\x01 \x03(\x0b\x32#.textsecure.SenderKeyStateStructureB4\n#org.whispersystems.libaxolotl.stateB\rStorageProtos')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SESSIONSTRUCTURE_CHAIN_CHAINKEY = _descriptor.Descriptor(
name='ChainKey',
full_name='textsecure.SessionStructure.Chain.ChainKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='textsecure.SessionStructure.Chain.ChainKey.index', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='textsecure.SessionStructure.Chain.ChainKey.key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=752,
serialized_end=790,
)
_SESSIONSTRUCTURE_CHAIN_MESSAGEKEY = _descriptor.Descriptor(
name='MessageKey',
full_name='textsecure.SessionStructure.Chain.MessageKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='textsecure.SessionStructure.Chain.MessageKey.index', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cipherKey', full_name='textsecure.SessionStructure.Chain.MessageKey.cipherKey', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='macKey', full_name='textsecure.SessionStructure.Chain.MessageKey.macKey', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iv', full_name='textsecure.SessionStructure.Chain.MessageKey.iv', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=792,
serialized_end=866,
)
_SESSIONSTRUCTURE_CHAIN = _descriptor.Descriptor(
name='Chain',
full_name='textsecure.SessionStructure.Chain',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='senderRatchetKey', full_name='textsecure.SessionStructure.Chain.senderRatchetKey', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='senderRatchetKeyPrivate', full_name='textsecure.SessionStructure.Chain.senderRatchetKeyPrivate', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chainKey', full_name='textsecure.SessionStructure.Chain.chainKey', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='messageKeys', full_name='textsecure.SessionStructure.Chain.messageKeys', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SESSIONSTRUCTURE_CHAIN_CHAINKEY, _SESSIONSTRUCTURE_CHAIN_MESSAGEKEY, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=553,
serialized_end=866,
)
_SESSIONSTRUCTURE_PENDINGKEYEXCHANGE = _descriptor.Descriptor(
name='PendingKeyExchange',
full_name='textsecure.SessionStructure.PendingKeyExchange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sequence', full_name='textsecure.SessionStructure.PendingKeyExchange.sequence', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localBaseKey', full_name='textsecure.SessionStructure.PendingKeyExchange.localBaseKey', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localBaseKeyPrivate', full_name='textsecure.SessionStructure.PendingKeyExchange.localBaseKeyPrivate', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localRatchetKey', full_name='textsecure.SessionStructure.PendingKeyExchange.localRatchetKey', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localRatchetKeyPrivate', full_name='textsecure.SessionStructure.PendingKeyExchange.localRatchetKeyPrivate', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localIdentityKey', full_name='textsecure.SessionStructure.PendingKeyExchange.localIdentityKey', index=5,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localIdentityKeyPrivate', full_name='textsecure.SessionStructure.PendingKeyExchange.localIdentityKeyPrivate', index=6,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=869,
serialized_end=1074,
)
_SESSIONSTRUCTURE_PENDINGPREKEY = _descriptor.Descriptor(
name='PendingPreKey',
full_name='textsecure.SessionStructure.PendingPreKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='preKeyId', full_name='textsecure.SessionStructure.PendingPreKey.preKeyId', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signedPreKeyId', full_name='textsecure.SessionStructure.PendingPreKey.signedPreKeyId', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='baseKey', full_name='textsecure.SessionStructure.PendingPreKey.baseKey', index=2,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1076,
serialized_end=1150,
)
_SESSIONSTRUCTURE = _descriptor.Descriptor(
name='SessionStructure',
full_name='textsecure.SessionStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sessionVersion', full_name='textsecure.SessionStructure.sessionVersion', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localIdentityPublic', full_name='textsecure.SessionStructure.localIdentityPublic', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='remoteIdentityPublic', full_name='textsecure.SessionStructure.remoteIdentityPublic', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rootKey', full_name='textsecure.SessionStructure.rootKey', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='previousCounter', full_name='textsecure.SessionStructure.previousCounter', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='senderChain', full_name='textsecure.SessionStructure.senderChain', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='receiverChains', full_name='textsecure.SessionStructure.receiverChains', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pendingKeyExchange', full_name='textsecure.SessionStructure.pendingKeyExchange', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pendingPreKey', full_name='textsecure.SessionStructure.pendingPreKey', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='remoteRegistrationId', full_name='textsecure.SessionStructure.remoteRegistrationId', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localRegistrationId', full_name='textsecure.SessionStructure.localRegistrationId', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='needsRefresh', full_name='textsecure.SessionStructure.needsRefresh', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aliceBaseKey', full_name='textsecure.SessionStructure.aliceBaseKey', index=12,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SESSIONSTRUCTURE_CHAIN, _SESSIONSTRUCTURE_PENDINGKEYEXCHANGE, _SESSIONSTRUCTURE_PENDINGPREKEY, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=1150,
)
_RECORDSTRUCTURE = _descriptor.Descriptor(
name='RecordStructure',
full_name='textsecure.RecordStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='currentSession', full_name='textsecure.RecordStructure.currentSession', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='previousSessions', full_name='textsecure.RecordStructure.previousSessions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1152,
serialized_end=1279,
)
_PREKEYRECORDSTRUCTURE = _descriptor.Descriptor(
name='PreKeyRecordStructure',
full_name='textsecure.PreKeyRecordStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='textsecure.PreKeyRecordStructure.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='publicKey', full_name='textsecure.PreKeyRecordStructure.publicKey', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='privateKey', full_name='textsecure.PreKeyRecordStructure.privateKey', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1355,
)
_SIGNEDPREKEYRECORDSTRUCTURE = _descriptor.Descriptor(
name='SignedPreKeyRecordStructure',
full_name='textsecure.SignedPreKeyRecordStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='textsecure.SignedPreKeyRecordStructure.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='publicKey', full_name='textsecure.SignedPreKeyRecordStructure.publicKey', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='privateKey', full_name='textsecure.SignedPreKeyRecordStructure.privateKey', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signature', full_name='textsecure.SignedPreKeyRecordStructure.signature', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='textsecure.SignedPreKeyRecordStructure.timestamp', index=4,
number=5, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1357,
serialized_end=1475,
)
_IDENTITYKEYPAIRSTRUCTURE = _descriptor.Descriptor(
name='IdentityKeyPairStructure',
full_name='textsecure.IdentityKeyPairStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='publicKey', full_name='textsecure.IdentityKeyPairStructure.publicKey', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='privateKey', full_name='textsecure.IdentityKeyPairStructure.privateKey', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1477,
serialized_end=1542,
)
_SENDERKEYSTATESTRUCTURE_SENDERCHAINKEY = _descriptor.Descriptor(
name='SenderChainKey',
full_name='textsecure.SenderKeyStateStructure.SenderChainKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='iteration', full_name='textsecure.SenderKeyStateStructure.SenderChainKey.iteration', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seed', full_name='textsecure.SenderKeyStateStructure.SenderChainKey.seed', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1830,
serialized_end=1879,
)
_SENDERKEYSTATESTRUCTURE_SENDERMESSAGEKEY = _descriptor.Descriptor(
name='SenderMessageKey',
full_name='textsecure.SenderKeyStateStructure.SenderMessageKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='iteration', full_name='textsecure.SenderKeyStateStructure.SenderMessageKey.iteration', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seed', full_name='textsecure.SenderKeyStateStructure.SenderMessageKey.seed', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1881,
serialized_end=1932,
)
_SENDERKEYSTATESTRUCTURE_SENDERSIGNINGKEY = _descriptor.Descriptor(
name='SenderSigningKey',
full_name='textsecure.SenderKeyStateStructure.SenderSigningKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='public', full_name='textsecure.SenderKeyStateStructure.SenderSigningKey.public', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='private', full_name='textsecure.SenderKeyStateStructure.SenderSigningKey.private', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1934,
serialized_end=1985,
)
_SENDERKEYSTATESTRUCTURE = _descriptor.Descriptor(
name='SenderKeyStateStructure',
full_name='textsecure.SenderKeyStateStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='senderKeyId', full_name='textsecure.SenderKeyStateStructure.senderKeyId', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='senderChainKey', full_name='textsecure.SenderKeyStateStructure.senderChainKey', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='senderSigningKey', full_name='textsecure.SenderKeyStateStructure.senderSigningKey', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='senderMessageKeys', full_name='textsecure.SenderKeyStateStructure.senderMessageKeys', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SENDERKEYSTATESTRUCTURE_SENDERCHAINKEY, _SENDERKEYSTATESTRUCTURE_SENDERMESSAGEKEY, _SENDERKEYSTATESTRUCTURE_SENDERSIGNINGKEY, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1545,
serialized_end=1985,
)
_SENDERKEYRECORDSTRUCTURE = _descriptor.Descriptor(
name='SenderKeyRecordStructure',
full_name='textsecure.SenderKeyRecordStructure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='senderKeyStates', full_name='textsecure.SenderKeyRecordStructure.senderKeyStates', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1987,
serialized_end=2075,
)
_SESSIONSTRUCTURE_CHAIN_CHAINKEY.containing_type = _SESSIONSTRUCTURE_CHAIN
_SESSIONSTRUCTURE_CHAIN_MESSAGEKEY.containing_type = _SESSIONSTRUCTURE_CHAIN
_SESSIONSTRUCTURE_CHAIN.fields_by_name['chainKey'].message_type = _SESSIONSTRUCTURE_CHAIN_CHAINKEY
_SESSIONSTRUCTURE_CHAIN.fields_by_name['messageKeys'].message_type = _SESSIONSTRUCTURE_CHAIN_MESSAGEKEY
_SESSIONSTRUCTURE_CHAIN.containing_type = _SESSIONSTRUCTURE
_SESSIONSTRUCTURE_PENDINGKEYEXCHANGE.containing_type = _SESSIONSTRUCTURE
_SESSIONSTRUCTURE_PENDINGPREKEY.containing_type = _SESSIONSTRUCTURE
_SESSIONSTRUCTURE.fields_by_name['senderChain'].message_type = _SESSIONSTRUCTURE_CHAIN
_SESSIONSTRUCTURE.fields_by_name['receiverChains'].message_type = _SESSIONSTRUCTURE_CHAIN
_SESSIONSTRUCTURE.fields_by_name['pendingKeyExchange'].message_type = _SESSIONSTRUCTURE_PENDINGKEYEXCHANGE
_SESSIONSTRUCTURE.fields_by_name['pendingPreKey'].message_type = _SESSIONSTRUCTURE_PENDINGPREKEY
_RECORDSTRUCTURE.fields_by_name['currentSession'].message_type = _SESSIONSTRUCTURE
_RECORDSTRUCTURE.fields_by_name['previousSessions'].message_type = _SESSIONSTRUCTURE
_SENDERKEYSTATESTRUCTURE_SENDERCHAINKEY.containing_type = _SENDERKEYSTATESTRUCTURE
_SENDERKEYSTATESTRUCTURE_SENDERMESSAGEKEY.containing_type = _SENDERKEYSTATESTRUCTURE
_SENDERKEYSTATESTRUCTURE_SENDERSIGNINGKEY.containing_type = _SENDERKEYSTATESTRUCTURE
_SENDERKEYSTATESTRUCTURE.fields_by_name['senderChainKey'].message_type = _SENDERKEYSTATESTRUCTURE_SENDERCHAINKEY
_SENDERKEYSTATESTRUCTURE.fields_by_name['senderSigningKey'].message_type = _SENDERKEYSTATESTRUCTURE_SENDERSIGNINGKEY
_SENDERKEYSTATESTRUCTURE.fields_by_name['senderMessageKeys'].message_type = _SENDERKEYSTATESTRUCTURE_SENDERMESSAGEKEY
_SENDERKEYRECORDSTRUCTURE.fields_by_name['senderKeyStates'].message_type = _SENDERKEYSTATESTRUCTURE
DESCRIPTOR.message_types_by_name['SessionStructure'] = _SESSIONSTRUCTURE
DESCRIPTOR.message_types_by_name['RecordStructure'] = _RECORDSTRUCTURE
DESCRIPTOR.message_types_by_name['PreKeyRecordStructure'] = _PREKEYRECORDSTRUCTURE
DESCRIPTOR.message_types_by_name['SignedPreKeyRecordStructure'] = _SIGNEDPREKEYRECORDSTRUCTURE
DESCRIPTOR.message_types_by_name['IdentityKeyPairStructure'] = _IDENTITYKEYPAIRSTRUCTURE
DESCRIPTOR.message_types_by_name['SenderKeyStateStructure'] = _SENDERKEYSTATESTRUCTURE
DESCRIPTOR.message_types_by_name['SenderKeyRecordStructure'] = _SENDERKEYRECORDSTRUCTURE
SessionStructure = _reflection.GeneratedProtocolMessageType('SessionStructure', (_message.Message,), dict(
Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict(
ChainKey = _reflection.GeneratedProtocolMessageType('ChainKey', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTRUCTURE_CHAIN_CHAINKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure.Chain.ChainKey)
))
,
MessageKey = _reflection.GeneratedProtocolMessageType('MessageKey', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTRUCTURE_CHAIN_MESSAGEKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure.Chain.MessageKey)
))
,
DESCRIPTOR = _SESSIONSTRUCTURE_CHAIN,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure.Chain)
))
,
PendingKeyExchange = _reflection.GeneratedProtocolMessageType('PendingKeyExchange', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTRUCTURE_PENDINGKEYEXCHANGE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure.PendingKeyExchange)
))
,
PendingPreKey = _reflection.GeneratedProtocolMessageType('PendingPreKey', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTRUCTURE_PENDINGPREKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure.PendingPreKey)
))
,
DESCRIPTOR = _SESSIONSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SessionStructure)
))
_sym_db.RegisterMessage(SessionStructure)
_sym_db.RegisterMessage(SessionStructure.Chain)
_sym_db.RegisterMessage(SessionStructure.Chain.ChainKey)
_sym_db.RegisterMessage(SessionStructure.Chain.MessageKey)
_sym_db.RegisterMessage(SessionStructure.PendingKeyExchange)
_sym_db.RegisterMessage(SessionStructure.PendingPreKey)
RecordStructure = _reflection.GeneratedProtocolMessageType('RecordStructure', (_message.Message,), dict(
DESCRIPTOR = _RECORDSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.RecordStructure)
))
_sym_db.RegisterMessage(RecordStructure)
PreKeyRecordStructure = _reflection.GeneratedProtocolMessageType('PreKeyRecordStructure', (_message.Message,), dict(
DESCRIPTOR = _PREKEYRECORDSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.PreKeyRecordStructure)
))
_sym_db.RegisterMessage(PreKeyRecordStructure)
SignedPreKeyRecordStructure = _reflection.GeneratedProtocolMessageType('SignedPreKeyRecordStructure', (_message.Message,), dict(
DESCRIPTOR = _SIGNEDPREKEYRECORDSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SignedPreKeyRecordStructure)
))
_sym_db.RegisterMessage(SignedPreKeyRecordStructure)
IdentityKeyPairStructure = _reflection.GeneratedProtocolMessageType('IdentityKeyPairStructure', (_message.Message,), dict(
DESCRIPTOR = _IDENTITYKEYPAIRSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.IdentityKeyPairStructure)
))
_sym_db.RegisterMessage(IdentityKeyPairStructure)
SenderKeyStateStructure = _reflection.GeneratedProtocolMessageType('SenderKeyStateStructure', (_message.Message,), dict(
SenderChainKey = _reflection.GeneratedProtocolMessageType('SenderChainKey', (_message.Message,), dict(
DESCRIPTOR = _SENDERKEYSTATESTRUCTURE_SENDERCHAINKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SenderKeyStateStructure.SenderChainKey)
))
,
SenderMessageKey = _reflection.GeneratedProtocolMessageType('SenderMessageKey', (_message.Message,), dict(
DESCRIPTOR = _SENDERKEYSTATESTRUCTURE_SENDERMESSAGEKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SenderKeyStateStructure.SenderMessageKey)
))
,
SenderSigningKey = _reflection.GeneratedProtocolMessageType('SenderSigningKey', (_message.Message,), dict(
DESCRIPTOR = _SENDERKEYSTATESTRUCTURE_SENDERSIGNINGKEY,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SenderKeyStateStructure.SenderSigningKey)
))
,
DESCRIPTOR = _SENDERKEYSTATESTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SenderKeyStateStructure)
))
_sym_db.RegisterMessage(SenderKeyStateStructure)
_sym_db.RegisterMessage(SenderKeyStateStructure.SenderChainKey)
_sym_db.RegisterMessage(SenderKeyStateStructure.SenderMessageKey)
_sym_db.RegisterMessage(SenderKeyStateStructure.SenderSigningKey)
SenderKeyRecordStructure = _reflection.GeneratedProtocolMessageType('SenderKeyRecordStructure', (_message.Message,), dict(
DESCRIPTOR = _SENDERKEYRECORDSTRUCTURE,
__module__ = 'LocalStorageProtocol_pb2'
# @@protoc_insertion_point(class_scope:textsecure.SenderKeyRecordStructure)
))
_sym_db.RegisterMessage(SenderKeyRecordStructure)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n#org.whispersystems.libaxolotl.stateB\rStorageProtos'))
# @@protoc_insertion_point(module_scope)
|
dbdd4us/compose
|
refs/heads/master
|
tests/unit/config/sort_services_test.py
|
19
|
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from compose.config.errors import DependencyError
from compose.config.sort_services import sort_service_dicts
from compose.config.types import VolumeFromSpec
class TestSortService(object):
def test_sort_service_dicts_1(self):
services = [
{
'links': ['redis'],
'name': 'web'
},
{
'name': 'grunt'
},
{
'name': 'redis'
}
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'grunt'
assert sorted_services[1]['name'] == 'redis'
assert sorted_services[2]['name'] == 'web'
def test_sort_service_dicts_2(self):
services = [
{
'links': ['redis', 'postgres'],
'name': 'web'
},
{
'name': 'postgres',
'links': ['redis']
},
{
'name': 'redis'
}
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'redis'
assert sorted_services[1]['name'] == 'postgres'
assert sorted_services[2]['name'] == 'web'
def test_sort_service_dicts_3(self):
services = [
{
'name': 'child'
},
{
'name': 'parent',
'links': ['child']
},
{
'links': ['parent'],
'name': 'grandparent'
},
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'child'
assert sorted_services[1]['name'] == 'parent'
assert sorted_services[2]['name'] == 'grandparent'
def test_sort_service_dicts_4(self):
services = [
{
'name': 'child'
},
{
'name': 'parent',
'volumes_from': [VolumeFromSpec('child', 'rw', 'service')]
},
{
'links': ['parent'],
'name': 'grandparent'
},
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'child'
assert sorted_services[1]['name'] == 'parent'
assert sorted_services[2]['name'] == 'grandparent'
def test_sort_service_dicts_5(self):
services = [
{
'links': ['parent'],
'name': 'grandparent'
},
{
'name': 'parent',
'network_mode': 'service:child'
},
{
'name': 'child'
}
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'child'
assert sorted_services[1]['name'] == 'parent'
assert sorted_services[2]['name'] == 'grandparent'
def test_sort_service_dicts_6(self):
services = [
{
'links': ['parent'],
'name': 'grandparent'
},
{
'name': 'parent',
'volumes_from': [VolumeFromSpec('child', 'ro', 'service')]
},
{
'name': 'child'
}
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 3
assert sorted_services[0]['name'] == 'child'
assert sorted_services[1]['name'] == 'parent'
assert sorted_services[2]['name'] == 'grandparent'
def test_sort_service_dicts_7(self):
services = [
{
'network_mode': 'service:three',
'name': 'four'
},
{
'links': ['two'],
'name': 'three'
},
{
'name': 'two',
'volumes_from': [VolumeFromSpec('one', 'rw', 'service')]
},
{
'name': 'one'
}
]
sorted_services = sort_service_dicts(services)
assert len(sorted_services) == 4
assert sorted_services[0]['name'] == 'one'
assert sorted_services[1]['name'] == 'two'
assert sorted_services[2]['name'] == 'three'
assert sorted_services[3]['name'] == 'four'
def test_sort_service_dicts_circular_imports(self):
services = [
{
'links': ['redis'],
'name': 'web'
},
{
'name': 'redis',
'links': ['web']
},
]
with pytest.raises(DependencyError) as exc:
sort_service_dicts(services)
assert 'redis' in exc.exconly()
assert 'web' in exc.exconly()
def test_sort_service_dicts_circular_imports_2(self):
services = [
{
'links': ['postgres', 'redis'],
'name': 'web'
},
{
'name': 'redis',
'links': ['web']
},
{
'name': 'postgres'
}
]
with pytest.raises(DependencyError) as exc:
sort_service_dicts(services)
assert 'redis' in exc.exconly()
assert 'web' in exc.exconly()
def test_sort_service_dicts_circular_imports_3(self):
services = [
{
'links': ['b'],
'name': 'a'
},
{
'name': 'b',
'links': ['c']
},
{
'name': 'c',
'links': ['a']
}
]
with pytest.raises(DependencyError) as exc:
sort_service_dicts(services)
assert 'a' in exc.exconly()
assert 'b' in exc.exconly()
def test_sort_service_dicts_self_imports(self):
services = [
{
'links': ['web'],
'name': 'web'
},
]
with pytest.raises(DependencyError) as exc:
sort_service_dicts(services)
assert 'web' in exc.exconly()
def test_sort_service_dicts_depends_on_self(self):
services = [
{
'depends_on': ['web'],
'name': 'web'
},
]
with pytest.raises(DependencyError) as exc:
sort_service_dicts(services)
assert 'A service can not depend on itself: web' in exc.exconly()
|
vrenaville/hr
|
refs/heads/8.0
|
__unported__/hr_emergency_contact/__init__.py
|
25
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr
|
liuhb/hdfs-gress
|
refs/heads/master
|
src/main/python/sample-python.py
|
2
|
#!/usr/bin/python
import sys, os, re
# read the local file from standard input
input_file=sys.stdin.readline()
# extract the filename from the file
filename = os.path.basename(input_file)
# extract the date from the filename
match=re.search(r'([0-9]{4})([0-9]{2})([0-9]{2})', filename)
year=match.group(1)
mon=match.group(2)
day=match.group(3)
# construct our destination HDFS file
hdfs_dest="hdfs:/data/%s/%s/%s/%s" % (year, mon, day, filename)
# write it to standard output
print hdfs_dest,
|
rleadbetter/Sick-Beard
|
refs/heads/anime
|
lib/hachoir_parser/common/win32_lang_id.py
|
186
|
"""
Windows 2000 - List of Locale IDs and Language Groups
Original data table:
http://www.microsoft.com/globaldev/reference/win2k/setup/lcid.mspx
"""
LANGUAGE_ID = {
0x0436: u"Afrikaans",
0x041c: u"Albanian",
0x0401: u"Arabic Saudi Arabia",
0x0801: u"Arabic Iraq",
0x0c01: u"Arabic Egypt",
0x1001: u"Arabic Libya",
0x1401: u"Arabic Algeria",
0x1801: u"Arabic Morocco",
0x1c01: u"Arabic Tunisia",
0x2001: u"Arabic Oman",
0x2401: u"Arabic Yemen",
0x2801: u"Arabic Syria",
0x2c01: u"Arabic Jordan",
0x3001: u"Arabic Lebanon",
0x3401: u"Arabic Kuwait",
0x3801: u"Arabic UAE",
0x3c01: u"Arabic Bahrain",
0x4001: u"Arabic Qatar",
0x042b: u"Armenian",
0x042c: u"Azeri Latin",
0x082c: u"Azeri Cyrillic",
0x042d: u"Basque",
0x0423: u"Belarusian",
0x0402: u"Bulgarian",
0x0403: u"Catalan",
0x0404: u"Chinese Taiwan",
0x0804: u"Chinese PRC",
0x0c04: u"Chinese Hong Kong",
0x1004: u"Chinese Singapore",
0x1404: u"Chinese Macau",
0x041a: u"Croatian",
0x0405: u"Czech",
0x0406: u"Danish",
0x0413: u"Dutch Standard",
0x0813: u"Dutch Belgian",
0x0409: u"English United States",
0x0809: u"English United Kingdom",
0x0c09: u"English Australian",
0x1009: u"English Canadian",
0x1409: u"English New Zealand",
0x1809: u"English Irish",
0x1c09: u"English South Africa",
0x2009: u"English Jamaica",
0x2409: u"English Caribbean",
0x2809: u"English Belize",
0x2c09: u"English Trinidad",
0x3009: u"English Zimbabwe",
0x3409: u"English Philippines",
0x0425: u"Estonian",
0x0438: u"Faeroese",
0x0429: u"Farsi",
0x040b: u"Finnish",
0x040c: u"French Standard",
0x080c: u"French Belgian",
0x0c0c: u"French Canadian",
0x100c: u"French Swiss",
0x140c: u"French Luxembourg",
0x180c: u"French Monaco",
0x0437: u"Georgian",
0x0407: u"German Standard",
0x0807: u"German Swiss",
0x0c07: u"German Austrian",
0x1007: u"German Luxembourg",
0x1407: u"German Liechtenstein",
0x0408: u"Greek",
0x040d: u"Hebrew",
0x0439: u"Hindi",
0x040e: u"Hungarian",
0x040f: u"Icelandic",
0x0421: u"Indonesian",
0x0410: u"Italian Standard",
0x0810: u"Italian Swiss",
0x0411: u"Japanese",
0x043f: u"Kazakh",
0x0457: u"Konkani",
0x0412: u"Korean",
0x0426: u"Latvian",
0x0427: u"Lithuanian",
0x042f: u"Macedonian",
0x043e: u"Malay Malaysia",
0x083e: u"Malay Brunei Darussalam",
0x044e: u"Marathi",
0x0414: u"Norwegian Bokmal",
0x0814: u"Norwegian Nynorsk",
0x0415: u"Polish",
0x0416: u"Portuguese Brazilian",
0x0816: u"Portuguese Standard",
0x0418: u"Romanian",
0x0419: u"Russian",
0x044f: u"Sanskrit",
0x081a: u"Serbian Latin",
0x0c1a: u"Serbian Cyrillic",
0x041b: u"Slovak",
0x0424: u"Slovenian",
0x040a: u"Spanish Traditional Sort",
0x080a: u"Spanish Mexican",
0x0c0a: u"Spanish Modern Sort",
0x100a: u"Spanish Guatemala",
0x140a: u"Spanish Costa Rica",
0x180a: u"Spanish Panama",
0x1c0a: u"Spanish Dominican Republic",
0x200a: u"Spanish Venezuela",
0x240a: u"Spanish Colombia",
0x280a: u"Spanish Peru",
0x2c0a: u"Spanish Argentina",
0x300a: u"Spanish Ecuador",
0x340a: u"Spanish Chile",
0x380a: u"Spanish Uruguay",
0x3c0a: u"Spanish Paraguay",
0x400a: u"Spanish Bolivia",
0x440a: u"Spanish El Salvador",
0x480a: u"Spanish Honduras",
0x4c0a: u"Spanish Nicaragua",
0x500a: u"Spanish Puerto Rico",
0x0441: u"Swahili",
0x041d: u"Swedish",
0x081d: u"Swedish Finland",
0x0449: u"Tamil",
0x0444: u"Tatar",
0x041e: u"Thai",
0x041f: u"Turkish",
0x0422: u"Ukrainian",
0x0420: u"Urdu",
0x0443: u"Uzbek Latin",
0x0843: u"Uzbek Cyrillic",
0x042a: u"Vietnamese",
}
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/debug/test_step_over_yield.py
|
29
|
def generator2():
for i in range(4):
yield i
def generator():
a = 42 #breakpoint
yield from generator2()
return a
sum = 0
for i in generator():
sum += i
print("The end")
|
Nikoala/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ringtv.py
|
22
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
"file": "857645.mp4",
"md5": "d25945f5df41cdca2d2587165ac28720",
"info_dict": {
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id').split('-')[0]
webpage = self._download_webpage(url, video_id)
if mobj.group('type') == 'news':
video_id = self._search_regex(
r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/
embed_iframe/[0-9]+/video/([0-9]+)/''',
webpage, 'real video ID')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
}
|
sofmonk/aima-python
|
refs/heads/branch
|
tests/test_utils.py
|
1
|
import pytest
from utils import * # noqa
def test_removeall_list():
assert removeall(4, []) == []
assert removeall(4, [1, 2, 3, 4]) == [1, 2, 3]
assert removeall(4, [4, 1, 4, 2, 3, 4, 4]) == [1, 2, 3]
def test_removeall_string():
assert removeall('s', '') == ''
assert removeall('s', 'This is a test. Was a test.') == 'Thi i a tet. Wa a tet.'
def test_unique():
assert unique([1, 2, 3, 2, 1]) == [1, 2, 3]
assert unique([1, 5, 6, 7, 6, 5]) == [1, 5, 6, 7]
def test_product():
assert product([1, 2, 3, 4]) == 24
assert product(list(range(1, 11))) == 3628800
def test_first():
assert first('word') == 'w'
assert first('') is None
assert first('', 'empty') == 'empty'
assert first(range(10)) == 0
assert first(x for x in range(10) if x > 3) == 4
assert first(x for x in range(10) if x > 100) is None
def test_is_in():
e = []
assert is_in(e, [1, e, 3]) is True
assert is_in(e, [1, [], 3]) is False
def test_argminmax():
assert argmin([-2, 1], key=abs) == 1
assert argmax([-2, 1], key=abs) == -2
assert argmax(['one', 'to', 'three'], key=len) == 'three'
def test_histogram():
assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1]) == [(1, 2), (2, 3),
(4, 2), (5, 1),
(7, 1), (9, 1)]
assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 0, lambda x: x*x) == [(1, 2), (4, 3),
(16, 2), (25, 1),
(49, 1), (81, 1)]
assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 1) == [(2, 3), (4, 2),
(1, 2), (9, 1),
(7, 1), (5, 1)]
def test_dotproduct():
assert dotproduct([1, 2, 3], [1000, 100, 10]) == 1230
def test_element_wise_product():
assert element_wise_product([1, 2, 5], [7, 10, 0]) == [7, 20, 0]
assert element_wise_product([1, 6, 3, 0], [9, 12, 0, 0]) == [9, 72, 0, 0]
def test_matrix_multiplication():
assert matrix_multiplication([[1, 2, 3],
[2, 3, 4]],
[[3, 4],
[1, 2],
[1, 0]]) == [[8, 8], [13, 14]]
assert matrix_multiplication([[1, 2, 3],
[2, 3, 4]],
[[3, 4, 8, 1],
[1, 2, 5, 0],
[1, 0, 0, 3]],
[[1, 2],
[3, 4],
[5, 6],
[1, 2]]) == [[132, 176], [224, 296]]
def test_vector_to_diagonal():
assert vector_to_diagonal([1, 2, 3]) == [[1, 0, 0], [0, 2, 0], [0, 0, 3]]
assert vector_to_diagonal([0, 3, 6]) == [[0, 0, 0], [0, 3, 0], [0, 0, 6]]
def test_vector_add():
assert vector_add((0, 1), (8, 9)) == (8, 10)
def test_scalar_vector_product():
assert scalar_vector_product(2, [1, 2, 3]) == [2, 4, 6]
def test_scalar_matrix_product():
assert rounder(scalar_matrix_product(-5, [[1, 2], [3, 4], [0, 6]])) == [[-5, -10], [-15, -20],
[0, -30]]
assert rounder(scalar_matrix_product(0.2, [[1, 2], [2, 3]])) == [[0.2, 0.4], [0.4, 0.6]]
def test_inverse_matrix():
assert rounder(inverse_matrix([[1, 0], [0, 1]])) == [[1, 0], [0, 1]]
assert rounder(inverse_matrix([[2, 1], [4, 3]])) == [[1.5, -0.5], [-2.0, 1.0]]
assert rounder(inverse_matrix([[4, 7], [2, 6]])) == [[0.6, -0.7], [-0.2, 0.4]]
def test_rounder():
assert rounder(5.3330000300330) == 5.3330
assert rounder(10.234566) == 10.2346
assert rounder([1.234566, 0.555555, 6.010101]) == [1.2346, 0.5556, 6.0101]
assert rounder([[1.234566, 0.555555, 6.010101],
[10.505050, 12.121212, 6.030303]]) == [[1.2346, 0.5556, 6.0101],
[10.5051, 12.1212, 6.0303]]
def test_num_or_str():
assert num_or_str('42') == 42
assert num_or_str(' 42x ') == '42x'
def test_normalize():
assert normalize([1, 2, 1]) == [0.25, 0.5, 0.25]
def test_clip():
assert [clip(x, 0, 1) for x in [-1, 0.5, 10]] == [0, 0.5, 1]
def test_sigmoid():
assert isclose(0.5, sigmoid(0))
assert isclose(0.7310585786300049, sigmoid(1))
assert isclose(0.2689414213699951, sigmoid(-1))
def test_step():
assert step(1) == step(0.5) == 1
assert step(0) == 1
assert step(-1) == step(-0.5) == 0
def test_Expr():
A, B, C = symbols('A, B, C')
assert symbols('A, B, C') == (Symbol('A'), Symbol('B'), Symbol('C'))
assert A.op == repr(A) == 'A'
assert arity(A) == 0 and A.args == ()
b = Expr('+', A, 1)
assert arity(b) == 2 and b.op == '+' and b.args == (A, 1)
u = Expr('-', b)
assert arity(u) == 1 and u.op == '-' and u.args == (b,)
assert (b ** u) == (b ** u)
assert (b ** u) != (u ** b)
assert A + b * C ** 2 == A + (b * (C ** 2))
ex = C + 1 / (A % 1)
assert list(subexpressions(ex)) == [(C + (1 / (A % 1))), C, (1 / (A % 1)), 1, (A % 1), A, 1]
assert A in subexpressions(ex)
assert B not in subexpressions(ex)
def test_expr():
P, Q, x, y, z, GP = symbols('P, Q, x, y, z, GP')
assert (expr(y + 2 * x)
== expr('y + 2 * x')
== Expr('+', y, Expr('*', 2, x)))
assert expr('P & Q ==> P') == Expr('==>', P & Q, P)
assert expr('P & Q <=> Q & P') == Expr('<=>', (P & Q), (Q & P))
assert expr('P(x) | P(y) & Q(z)') == (P(x) | (P(y) & Q(z)))
# x is grandparent of z if x is parent of y and y is parent of z:
assert (expr('GP(x, z) <== P(x, y) & P(y, z)')
== Expr('<==', GP(x, z), P(x, y) & P(y, z)))
if __name__ == '__main__':
pytest.main()
|
inveniosoftware/invenio-circulation
|
refs/heads/master
|
invenio_circulation/mappings/v7/__init__.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module for the circulation of ES mappings."""
|
sam-m888/gprime
|
refs/heads/master
|
gprime/filters/rules/repository/__init__.py
|
1
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._allrepos import AllRepos
from ._hasidof import HasIdOf
from ._regexpidof import RegExpIdOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasreferencecountof import HasReferenceCountOf
from ._repoprivate import RepoPrivate
from ._matchesfilter import MatchesFilter
from ._hasrepo import HasRepo
from ._changedsince import ChangedSince
from ._matchesnamesubstringof import MatchesNameSubstringOf
from ._hastag import HasTag
editor_rule_list = [
AllRepos,
HasIdOf,
RegExpIdOf,
HasNoteRegexp,
HasReferenceCountOf,
RepoPrivate,
MatchesFilter,
ChangedSince,
MatchesNameSubstringOf,
HasTag
]
|
cdrooom/odoo
|
refs/heads/master
|
addons/website_google_map/__openerp__.py
|
354
|
{
'name': 'Website Google Map',
'category': 'Hidden',
'summary': '',
'version': '1.0',
'description': """
OpenERP Website Google Map
==========================
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'website_partner', 'crm_partner_assign'],
'data': [
'views/google_map.xml',
],
'installable': True,
'auto_install': False,
}
|
4bic/grano
|
refs/heads/master
|
grano/alembic/versions/4d7168864daa_log_entry_models.py
|
5
|
"""log entry models
Revision ID: 4d7168864daa
Revises: 32a25698e4bb
Create Date: 2014-04-06 21:42:28.317694
"""
# revision identifiers, used by Alembic.
revision = '4d7168864daa'
down_revision = '32a25698e4bb'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('grano_log_entry',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pipeline_id', sa.Integer(), nullable=True),
sa.Column('level', sa.Unicode(), nullable=True),
sa.Column('message', sa.Unicode(), nullable=True),
sa.Column('error', sa.Unicode(), nullable=True),
sa.Column('details', sa.Unicode(), nullable=True),
sa.ForeignKeyConstraint(['pipeline_id'], ['grano_pipeline.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('grano_log_entry')
|
our-city-app/oca-backend
|
refs/heads/master
|
src/solutions/common/models/properties.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db
from mcfw.consts import MISSING
from mcfw.properties import unicode_property, typed_property, long_property, bool_property
from mcfw.serialization import s_long, s_unicode, ds_long, ds_unicode, get_list_serializer, get_list_deserializer, \
s_bool, ds_bool
from rogerthat.models import App
from rogerthat.to import TO
from rogerthat.to.service import UserDetailsTO
from solutions.common.consts import UNIT_PIECE
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def serialize_solution_user(stream, u):
s_long(stream, 3) # version
if u is None:
s_bool(stream, False)
else:
s_bool(stream, True)
s_unicode(stream, u.name)
s_unicode(stream, u.email)
s_unicode(stream, u.avatar_url)
s_unicode(stream, u.language)
s_unicode(stream, u.app_id if u.app_id is not MISSING else None)
def deserialize_solution_user(stream):
version = ds_long(stream) # version
if version > 2:
if not ds_bool(stream):
return None
u = SolutionUserTO()
u.name = ds_unicode(stream)
u.email = ds_unicode(stream)
u.avatar_url = ds_unicode(stream)
u.language = ds_unicode(stream)
if version > 1:
u.app_id = ds_unicode(stream)
else:
u.app_id = App.APP_ID_ROGERTHAT
return u
class SolutionUserTO(TO):
name = unicode_property('1')
email = unicode_property('2')
avatar_url = unicode_property('3')
language = unicode_property('4')
app_id = unicode_property('5')
@staticmethod
def fromTO(to):
u = SolutionUserTO()
u.name = to.name
u.email = to.email
u.avatar_url = to.avatar_url
u.language = to.language
u.app_id = to.app_id
return u
def to_user_details(self):
return UserDetailsTO.create(self.email, self.name, self.language, self.avatar_url, self.app_id)
class SolutionUserProperty(db.UnindexedProperty):
data_type = SolutionUserTO
def get_value_for_datastore(self, model_instance):
super_value = super(SolutionUserProperty, self).get_value_for_datastore(model_instance)
if not super_value:
return None
stream = StringIO.StringIO()
serialize_solution_user(stream, super_value)
return db.Blob(stream.getvalue())
def make_value_from_datastore(self, value):
if value is None:
return None
return deserialize_solution_user(StringIO.StringIO(value))
class MenuItemTO(TO):
VISIBLE_IN_MENU = 1
VISIBLE_IN_ORDER = 2
name = unicode_property('1')
price = long_property('2')
description = unicode_property('3')
visible_in = long_property('4')
# ordering unit and step
unit = long_property('5')
step = long_property('6')
image_id = long_property('7')
qr_url = unicode_property('8')
has_price = bool_property('9')
id = unicode_property('10')
# custom unit can be used for pricing without linking to (ordering) unit
custom_unit = long_property('11')
class MenuCategoryTO(TO):
name = unicode_property('1')
items = typed_property('2', MenuItemTO, True)
index = long_property('3')
predescription = unicode_property('4')
postdescription = unicode_property('5')
id = unicode_property('6')
def _serialize_item(stream, item):
s_unicode(stream, item.name)
s_long(stream, item.price)
s_unicode(stream, item.description)
s_long(stream, item.visible_in)
s_long(stream, item.unit)
s_long(stream, item.step)
s_long(stream, -1 if item.image_id in (None, MISSING) else item.image_id)
s_unicode(stream, None if item.qr_url is MISSING else item.qr_url)
s_bool(stream, item.price > 0 if item.has_price is MISSING else item.has_price)
s_unicode(stream, item.id)
s_long(stream, item.custom_unit)
def convert_price_to_long(str_price):
long_price = long(100 * float(str_price.replace(',', '.')))
return long_price
def _deserialize_item(stream, version):
item = MenuItemTO()
item.name = ds_unicode(stream)
item.price = convert_price_to_long(ds_unicode(stream)) if version < 5 else ds_long(stream)
item.description = ds_unicode(stream)
item.visible_in = ds_long(stream) if version > 2 else MenuItemTO.VISIBLE_IN_MENU | MenuItemTO.VISIBLE_IN_ORDER
item.unit = ds_long(stream) if version > 3 else UNIT_PIECE
item.step = ds_long(stream) if version > 3 else 1
if version < 6:
item.image_id = None
else:
item.image_id = ds_long(stream)
if item.image_id == -1:
item.image_id = None
item.qr_url = None if version < 7 else ds_unicode(stream)
item.has_price = item.price > 0 if version < 8 else ds_bool(stream)
item.id = ds_unicode(stream) if version > 8 else None
# defaults to ordering unit
item.custom_unit = ds_long(stream) if version > 9 else item.unit
return item
_serialize_item_list = get_list_serializer(_serialize_item)
_deserialize_item_list = get_list_deserializer(_deserialize_item, True)
def _serialize_category(stream, c):
s_unicode(stream, c.name)
_serialize_item_list(stream, c.items)
s_long(stream, c.index)
s_unicode(stream, c.predescription)
s_unicode(stream, c.postdescription)
s_unicode(stream, c.id)
def _deserialize_category(stream, version):
c = MenuCategoryTO()
c.name = ds_unicode(stream)
c.items = _deserialize_item_list(stream, version)
c.index = ds_long(stream)
c.predescription = ds_unicode(stream) if version > 1 else None
c.postdescription = ds_unicode(stream) if version > 1 else None
c.id = ds_unicode(stream) if version > 8 else None
return c
_serialize_category_list = get_list_serializer(_serialize_category)
_deserialize_category_list = get_list_deserializer(_deserialize_category, True)
class ActivatedModuleTO(TO):
name = unicode_property('1', default=None)
timestamp = long_property('2', default=0)
def _serialize_activated_module(stream, m):
s_unicode(stream, m.name)
s_long(stream, m.timestamp)
def _deserialize_activated_module(stream, version):
m = ActivatedModuleTO()
m.name = ds_unicode(stream)
m.timestamp = ds_long(stream)
return m
|
kerel-fs/skylines
|
refs/heads/master
|
tests/schemas/schemas/test_club.py
|
1
|
import pytest
from marshmallow import ValidationError
from skylines.schemas import ClubSchema
def test_deserialization_fails_for_empty_name():
with pytest.raises(ValidationError) as e:
ClubSchema(only=('name',)).load(dict(name=''))
errors = e.value.messages
assert 'name' in errors
assert 'Must not be empty.' in errors.get('name')
def test_deserialization_fails_for_spaced_name():
with pytest.raises(ValidationError) as e:
ClubSchema(only=('name',)).load(dict(name=' '))
errors = e.value.messages
assert 'name' in errors
assert 'Must not be empty.' in errors.get('name')
def test_deserialization_passes_for_valid_name():
data = ClubSchema(only=('name',)).load(dict(name=' foo ')).data
assert data['name'] == 'foo'
def test_serialization_passes_for_invalid_website():
data = ClubSchema().dump(dict(website='foobar')).data
assert data['website'] == 'foobar'
|
benlangmuir/swift
|
refs/heads/master
|
utils/android/adb_test_runner/main.py
|
65
|
# main.py - Push executables and run them on an Android device -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# lit tests assume a single program can be invoked to execute Swift code and
# make expectations upon the output. This program is a wrapper that has the
# same API, but runs the Swift program on an Android device instead of on the
# host.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
from adb.commands import execute_on_device
def _usage(program_name):
return 'usage: {} [executable_path] [executable_arguments]'.format(
program_name)
def _help(program_name):
return '{}\n\n'.format(_usage(program_name)) + \
'positional arguments:\n' + \
'\texecutable_path\t\tThe path to a local executable that is to ' + \
'be run on a connected Android device.\n' + \
'\texecutable_arguments\tAdditional arguments that are to be ' + \
'given to the executable when it is run on the device.\n'
def main(args=sys.argv):
"""
The main entry point for adb_test_runner.
Parse arguments and kick off the script. Return zero to indicate success,
a non-zero integer otherwise.
"""
# We don't use argparse, because we need to be able to pass
# --arbitrary -params --like=this to the executable we're running
# on device.
program_name = os.path.basename(args.pop(0))
if len(args) == 1 and args[0] in ['-h', '--help']:
print(_help(program_name))
return 0
try:
executable_path, executable_arguments = args[0], args[1:]
except IndexError:
print(_usage(program_name))
print('{}: error: argument "executable_path" is required'.format(
program_name))
return 1
return execute_on_device(executable_path, executable_arguments)
|
agogear/python-1
|
refs/heads/master
|
Jimmy66/0009/0009.py
|
34
|
#!/bin/env python
# -*- coding: utf-8 -*-
#也不清楚这里说的链接是什么定义,是指a标签还是所有href的链接,这里取后者
#导入模块
import re
import urllib2
#读取文件
def file_read(filename):
#因为用之前文件方法打开html不行,貌似涉及到编码问题,所以想了个抖机灵的方法,编码坑还是要填啊,在XML里面也要用到
Req = urllib2.Request("file:./Yixiaohan show-me-the-code.html")
r = urllib2.urlopen(Req)
html = r.read()
return html
#查找链接,返回列表
def link_find(html):
match = re.findall(r'href="(http[s]?:[^"]+)"',html) #加括号可以直接截取...偶然一试才知道,findall和python真强大,爱死
return match
#主函数,显示链接列表
def main():
html = file_read('Yixiaohan show-me-the-code.html')
link = link_find(html)
for string in link:
print string
if __name__ == '__main__':
main()
|
tristan0x/hpcbench
|
refs/heads/master
|
hpcbench/toolbox/spack.py
|
1
|
from subprocess import check_call, check_output
from .process import find_executable
class SpackCmd:
@property
def spack(self):
return find_executable('spack', required=False)
def install(self, *args):
self.cmd('install', '--show-log-on-error', '--verbose', *args)
def install_dir(self, *args):
output = self.cmd('location', '--install-dir', *args, output=True)
return output.strip().decode('utf-8')
def cmd(self, *args, **kwargs):
command = [self.spack] + list(args)
if kwargs.get('output', False):
func = check_output
else:
func = check_call
return func(command)
|
GinnyN/Team-Fortress-RPG-Generators
|
refs/heads/master
|
build/lib/django/contrib/sites/managers.py
|
491
|
from django.conf import settings
from django.db import models
from django.db.models.fields import FieldDoesNotExist
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
self.__is_validated = False
def _validate_field_name(self):
field_names = self.model._meta.get_all_field_names()
# If a custom name is provided, make sure the field exists on the model
if self.__field_name is not None and self.__field_name not in field_names:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
# Otherwise, see if there is a field called either 'site' or 'sites'
else:
for potential_name in ['site', 'sites']:
if potential_name in field_names:
self.__field_name = potential_name
self.__is_validated = True
break
# Now do a type check on the field (FK or M2M only)
try:
field = self.model._meta.get_field(self.__field_name)
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
raise TypeError("%s must be a ForeignKey or ManyToManyField." %self.__field_name)
except FieldDoesNotExist:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
self.__is_validated = True
def get_query_set(self):
if not self.__is_validated:
self._validate_field_name()
return super(CurrentSiteManager, self).get_query_set().filter(**{self.__field_name + '__id__exact': settings.SITE_ID})
|
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/contrib/gis/geos/error.py
|
641
|
"""
This module houses the GEOS exceptions, specifically, GEOSException and
GEOSGeometryIndexError.
"""
class GEOSException(Exception):
"The base GEOS exception, indicates a GEOS-related error."
pass
class GEOSIndexError(GEOSException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
# "If, during the method lookup, a method raises an exception, the exception
# will be propagated, unless the exception has an attribute
# `silent_variable_failure` whose value is True." -- Django template docs.
silent_variable_failure = True
|
h3llrais3r/Auto-Subliminal
|
refs/heads/master
|
lib/markdown/extensions/smart_strong.py
|
8
|
'''
Smart_Strong Extension for Python-Markdown
==========================================
This extention adds smarter handling of double underscores within words.
See <https://Python-Markdown.github.io/extensions/smart_strong>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
STRONG_RE = r'(\*{2})(.+?)\2'
class SmartEmphasisExtension(Extension):
""" Add smart_emphasis extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Modify inline patterns. """
md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns.add(
'strong2',
SimpleTagPattern(SMART_STRONG_RE, 'strong'),
'>emphasis2'
)
def makeExtension(*args, **kwargs):
return SmartEmphasisExtension(*args, **kwargs)
|
RoyalTS/econ-project-templates
|
refs/heads/python
|
.mywaflib/waflib/Tools/intltool.py
|
3
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Support for translation tools such as msgfmt and intltool
Usage::
def configure(conf):
conf.load('gnu_dirs intltool')
def build(bld):
# process the .po files into .gmo files, and install them in LOCALEDIR
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
# process an input file, substituting the translations from the po dir
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory.
"""
import os, re
from waflib import Configure, Context, TaskGen, Task, Utils, Runner, Options, Build, Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature, before_method, taskgen_method
from waflib.Logs import error
from waflib.Configure import conf
_style_flags = {
'ba': '-b',
'desktop': '-d',
'keys': '-k',
'quoted': '--quoted-style',
'quotedxml': '--quotedxml-style',
'rfc822deb': '-r',
'schemas': '-s',
'xml': '-x',
}
@taskgen_method
def ensure_localedir(self):
"""
Expand LOCALEDIR from DATAROOTDIR/locale if possible, or fallback to PREFIX/share/locale
"""
# use the tool gnu_dirs to provide options to define this
if not self.env.LOCALEDIR:
if self.env.DATAROOTDIR:
self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale')
else:
self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale')
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
"""
Create tasks to translate files by intltool-merge::
def build(bld):
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
:param podir: location of the .po files
:type podir: string
:param source: source files to process
:type source: list of string
:param style: the intltool-merge mode of operation, can be one of the following values:
``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``.
See the ``intltool-merge`` man page for more information about supported modes of operation.
:type style: string
:param flags: compilation flags ("-quc" by default)
:type flags: list of string
:param install_path: installation path
:type install_path: string
"""
try: self.meths.remove('process_source')
except ValueError: pass
self.ensure_localedir()
podir = getattr(self, 'podir', '.')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
return
cache = getattr(self, 'intlcache', '.intlcache')
self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)]
self.env.INTLPODIR = podirnode.bldpath()
self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT))
if '-c' in self.env.INTLFLAGS:
self.bld.fatal('Redundant -c flag in intltool task %r' % self)
style = getattr(self, 'style', None)
if style:
try:
style_flag = _style_flags[style]
except KeyError:
self.bld.fatal('intltool_in style "%s" is not valid' % style)
self.env.append_unique('INTLFLAGS', [style_flag])
for i in self.to_list(self.source):
node = self.path.find_resource(i)
task = self.create_task('intltool', node, node.change_ext(''))
inst = getattr(self, 'install_path', None)
if inst:
self.bld.install_files(inst, task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
"""
Create tasks to process po files::
def build(bld):
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
The relevant task generator arguments are:
:param podir: directory of the .po files
:type podir: string
:param appname: name of the application
:type appname: string
:param install_path: installation directory
:type install_path: string
The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process.
"""
try: self.meths.remove('process_source')
except ValueError: pass
self.ensure_localedir()
appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name'))
podir = getattr(self, 'podir', '.')
inst = getattr(self, 'install_path', '${LOCALEDIR}')
linguas = self.path.find_node(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
file = open(linguas.abspath())
langs = []
for line in file.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
file.close()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po', node, node.change_ext('.mo'))
if inst:
filename = task.outputs[0].name
(langname, ext) = os.path.splitext(filename)
inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.bld.install_as(inst_file, task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644), env=task.env)
else:
Logs.pprint('RED', "Error no LINGUAS file found in po directory")
class po(Task.Task):
"""
Compile .po files into .gmo files
"""
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
class intltool(Task.Task):
"""
Let intltool-merge translate an input file
"""
run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color = 'BLUE'
@conf
def find_msgfmt(conf):
conf.find_program('msgfmt', var='MSGFMT')
@conf
def find_intltool_merge(conf):
if not conf.env.PERL:
conf.find_program('perl', var='PERL')
conf.env.INTLCACHE_ST = '--cache=%s'
conf.env.INTLFLAGS_DEFAULT = ['-q', '-u']
conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL')
def configure(conf):
"""
Detect the program *msgfmt* and set *conf.env.MSGFMT*.
Detect the program *intltool-merge* and set *conf.env.INTLTOOL*.
It is possible to set INTLTOOL in the environment, but it must not have spaces in it::
$ INTLTOOL="/path/to/the program/intltool" waf configure
If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*.
"""
conf.find_msgfmt()
conf.find_intltool_merge()
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
|
Plain-Andy-legacy/android_external_chromium_org
|
refs/heads/lp-5.1r1
|
build/android/pylib/base/test_dispatcher_unittest.py
|
33
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for test_dispatcher.py."""
# pylint: disable=R0201
# pylint: disable=W0212
import os
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir))
# Mock out android_commands.GetAttachedDevices().
from pylib import android_commands
android_commands.GetAttachedDevices = lambda: ['0', '1']
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_dispatcher
from pylib.utils import watchdog_timer
class TestException(Exception):
pass
class MockRunner(object):
"""A mock TestRunner."""
def __init__(self, device='0', shard_index=0):
self.device_serial = device
self.shard_index = shard_index
self.setups = 0
self.teardowns = 0
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.PASS))
return (results, None)
def SetUp(self):
self.setups += 1
def TearDown(self):
self.teardowns += 1
class MockRunnerFail(MockRunner):
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.FAIL))
return (results, test)
class MockRunnerFailTwice(MockRunner):
def __init__(self, device='0', shard_index=0):
super(MockRunnerFailTwice, self).__init__(device, shard_index)
self._fails = 0
def RunTest(self, test):
self._fails += 1
results = base_test_result.TestRunResults()
if self._fails <= 2:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.FAIL))
return (results, test)
else:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.PASS))
return (results, None)
class MockRunnerException(MockRunner):
def RunTest(self, test):
raise TestException
class TestFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunTestsFromQueue."""
@staticmethod
def _RunTests(mock_runner, tests):
results = []
tests = test_dispatcher._TestCollection(
[test_dispatcher._Test(t) for t in tests])
test_dispatcher._RunTestsFromQueue(mock_runner, tests, results,
watchdog_timer.WatchdogTimer(None), 2)
run_results = base_test_result.TestRunResults()
for r in results:
run_results.AddTestRunResults(r)
return run_results
def testRunTestsFromQueue(self):
results = TestFunctions._RunTests(MockRunner(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testRunTestsFromQueueRetry(self):
results = TestFunctions._RunTests(MockRunnerFail(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 2)
def testRunTestsFromQueueFailTwice(self):
results = TestFunctions._RunTests(MockRunnerFailTwice(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testSetUp(self):
runners = []
counter = test_dispatcher._ThreadSafeCounter()
test_dispatcher._SetUp(MockRunner, '0', runners, counter)
self.assertEqual(len(runners), 1)
self.assertEqual(runners[0].setups, 1)
def testThreadSafeCounter(self):
counter = test_dispatcher._ThreadSafeCounter()
for i in xrange(5):
self.assertEqual(counter.GetAndIncrement(), i)
def testApplyMaxPerRun(self):
self.assertEqual(
['A:B', 'C:D', 'E', 'F:G', 'H:I'],
test_dispatcher.ApplyMaxPerRun(['A:B', 'C:D:E', 'F:G:H:I'], 2))
class TestThreadGroupFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunAllTests and test_dispatcher._CreateRunners."""
def setUp(self):
self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shared_test_collection = test_dispatcher._TestCollection(
[test_dispatcher._Test(t) for t in self.tests])
self.test_collection_factory = lambda: shared_test_collection
def testCreate(self):
runners = test_dispatcher._CreateRunners(MockRunner, ['0', '1'])
for runner in runners:
self.assertEqual(runner.setups, 1)
self.assertEqual(set([r.device_serial for r in runners]),
set(['0', '1']))
self.assertEqual(set([r.shard_index for r in runners]),
set([0, 1]))
def testRun(self):
runners = [MockRunner('0'), MockRunner('1')]
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetPass()), len(self.tests))
self.assertEqual(exit_code, 0)
def testTearDown(self):
runners = [MockRunner('0'), MockRunner('1')]
test_dispatcher._TearDownRunners(runners)
for runner in runners:
self.assertEqual(runner.teardowns, 1)
def testRetry(self):
runners = test_dispatcher._CreateRunners(MockRunnerFail, ['0', '1'])
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetFail()), len(self.tests))
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testReraise(self):
runners = test_dispatcher._CreateRunners(MockRunnerException, ['0', '1'])
with self.assertRaises(TestException):
test_dispatcher._RunAllTests(runners, self.test_collection_factory, 0)
class TestShard(unittest.TestCase):
"""Tests test_dispatcher.RunTests with sharding."""
@staticmethod
def _RunShard(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, ['0', '1'], shard=True)
def testShard(self):
results, exit_code = TestShard._RunShard(MockRunner)
self.assertEqual(len(results.GetPass()), 3)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestShard._RunShard(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 3)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, ['0', '1'], shard=True)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
class TestReplicate(unittest.TestCase):
"""Tests test_dispatcher.RunTests with replication."""
@staticmethod
def _RunReplicate(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, ['0', '1'], shard=False)
def testReplicate(self):
results, exit_code = TestReplicate._RunReplicate(MockRunner)
# We expect 6 results since each test should have been run on every device
self.assertEqual(len(results.GetPass()), 6)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestReplicate._RunReplicate(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 6)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, ['0', '1'], shard=False)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
if __name__ == '__main__':
unittest.main()
|
CiscoSystems/neutron
|
refs/heads/master
|
neutron/agent/linux/ip_link_support.py
|
5
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from neutron.agent.linux import utils
from neutron.common import exceptions as n_exc
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class IpLinkSupportError(n_exc.NeutronException):
pass
class UnsupportedIpLinkCommand(IpLinkSupportError):
message = _("ip link command is not supported: %(reason)s")
class InvalidIpLinkCapability(IpLinkSupportError):
message = _("ip link capability %(capability)s is not supported")
class IpLinkConstants(object):
IP_LINK_CAPABILITY_STATE = "state"
IP_LINK_CAPABILITY_VLAN = "vlan"
IP_LINK_CAPABILITY_RATE = "rate"
IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk"
IP_LINK_SUB_CAPABILITY_QOS = "qos"
class IpLinkSupport(object):
VF_BLOCK_REGEX = "\[ vf NUM(?P<vf_block>.*) \] \]"
CAPABILITY_REGEX = "\[ %s (.*)"
SUB_CAPABILITY_REGEX = "\[ %(cap)s (.*) \[ %(subcap)s (.*)"
@classmethod
def get_vf_mgmt_section(cls, root_helper=None):
"""Parses ip link help output, and gets vf block
:param root_helper: root permission helper
"""
output = cls._get_ip_link_output(root_helper)
vf_block_pattern = re.search(cls.VF_BLOCK_REGEX,
output,
re.DOTALL | re.MULTILINE)
if vf_block_pattern:
return vf_block_pattern.group("vf_block")
@classmethod
def vf_mgmt_capability_supported(cls, vf_section, capability,
subcapability=None):
"""Validate vf capability support
Checks if given vf capability (and sub capability
if given) supported
:param vf_section: vf Num block content
:param capability: for example: vlan, rate, spoofchk, state
:param subcapability: for example: qos
"""
if not vf_section:
return False
if subcapability:
regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability,
"subcap": subcapability}
else:
regex = cls.CAPABILITY_REGEX % capability
pattern_match = re.search(regex, vf_section,
re.DOTALL | re.MULTILINE)
return pattern_match is not None
@classmethod
def _get_ip_link_output(cls, root_helper):
"""Gets the output of the ip link help command
Runs ip link help command and stores its output
Note: ip link help return error and writes its output to stderr
so we get the output from there. however, if this issue
will be solved and the command will write to stdout, we
will get the output from there too.
"""
try:
ip_cmd = ['ip', 'link', 'help']
_stdout, _stderr = utils.execute(
ip_cmd, root_helper,
check_exit_code=False,
return_stderr=True,
log_fail_as_error=False)
except Exception as e:
LOG.exception(_LE("Failed executing ip command"))
raise UnsupportedIpLinkCommand(reason=e)
return _stdout or _stderr
|
yeming233/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/stacks/tables.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.http import Http404
from django.template.defaultfilters import title
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.utils import filters
from heatclient import exc
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.stacks import mappings
class LaunchStack(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Stack")
url = "horizon:project:stacks:select_template"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("orchestration", "stacks:validate_template"),
("orchestration", "stacks:create"),)
class PreviewStack(tables.LinkAction):
name = "preview"
verbose_name = _("Preview Stack")
url = "horizon:project:stacks:preview_template"
classes = ("ajax-modal",)
icon = "eye"
policy_rules = (("orchestration", "stacks:validate_template"),
("orchestration", "stacks:preview"),)
class CheckStack(tables.BatchAction):
name = "check"
verbose_name = _("Check Stack")
policy_rules = (("orchestration", "actions:action"),)
icon = "check-square"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Check Stack",
u"Check Stacks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Checked Stack",
u"Checked Stacks",
count
)
def action(self, request, stack_id):
api.heat.action_check(request, stack_id)
class SuspendStack(tables.BatchAction):
name = "suspend"
verbose_name = _("Suspend Stack")
policy_rules = (("orchestration", "actions:action"),)
icon = "pause"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Suspend Stack",
u"Suspend Stacks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Suspended Stack",
u"Suspended Stacks",
count
)
def action(self, request, stack_id):
try:
api.heat.action_suspend(request, stack_id)
except Exception:
msg = _('Failed to suspend stack.')
exceptions.handle(request, msg)
class ResumeStack(tables.BatchAction):
name = "resume"
verbose_name = _("Resume Stack")
policy_rules = (("orchestration", "actions:action"),)
icon = "play"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Resume Stack",
u"Resume Stacks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Resumed Stack",
u"Resumed Stacks",
count
)
def action(self, request, stack_id):
try:
api.heat.action_resume(request, stack_id)
except Exception:
msg = _('Failed to resume stack.')
exceptions.handle(request, msg)
class ChangeStackTemplate(tables.LinkAction):
name = "edit"
verbose_name = _("Change Stack Template")
url = "horizon:project:stacks:change_template"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, stack):
return urlresolvers.reverse(self.url, args=[stack.id])
class DeleteStack(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Stack",
u"Delete Stacks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Stack",
u"Deleted Stacks",
count
)
policy_rules = (("orchestration", "stacks:delete"),)
def delete(self, request, stack_id):
try:
api.heat.stack_delete(request, stack_id)
except Exception:
msg = _('Failed to delete stack.')
exceptions.handle(request, msg)
def allowed(self, request, stack):
if stack is not None:
return stack.stack_status != 'DELETE_COMPLETE'
return True
class StacksUpdateRow(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.stack_status != 'DELETE_COMPLETE'
def get_data(self, request, stack_id):
try:
stack = api.heat.stack_get(request, stack_id)
if stack.stack_status == 'DELETE_COMPLETE':
# returning 404 to the ajax call removes the
# row from the table on the ui
raise Http404
return stack
except Http404:
raise
except Exception as e:
messages.error(request, e)
raise
class StacksFilterAction(tables.FilterAction):
filter_type = 'server'
filter_choices = (('name', _('Stack Name ='), True, _('Case-sensitive')),
('id', _('Stack ID ='), True),
('status', _('Status ='), True))
class StacksTable(tables.DataTable):
STATUS_CHOICES = (
("Complete", True),
("Failed", False),
)
STACK_STATUS_DISPLAY_CHOICES = (
("init_in_progress", pgettext_lazy("current status of stack",
u"Init In Progress")),
("init_complete", pgettext_lazy("current status of stack",
u"Init Complete")),
("init_failed", pgettext_lazy("current status of stack",
u"Init Failed")),
("create_in_progress", pgettext_lazy("current status of stack",
u"Create In Progress")),
("create_complete", pgettext_lazy("current status of stack",
u"Create Complete")),
("create_failed", pgettext_lazy("current status of stack",
u"Create Failed")),
("delete_in_progress", pgettext_lazy("current status of stack",
u"Delete In Progress")),
("delete_complete", pgettext_lazy("current status of stack",
u"Delete Complete")),
("delete_failed", pgettext_lazy("current status of stack",
u"Delete Failed")),
("update_in_progress", pgettext_lazy("current status of stack",
u"Update In Progress")),
("update_complete", pgettext_lazy("current status of stack",
u"Update Complete")),
("update_failed", pgettext_lazy("current status of stack",
u"Update Failed")),
("rollback_in_progress", pgettext_lazy("current status of stack",
u"Rollback In Progress")),
("rollback_complete", pgettext_lazy("current status of stack",
u"Rollback Complete")),
("rollback_failed", pgettext_lazy("current status of stack",
u"Rollback Failed")),
("suspend_in_progress", pgettext_lazy("current status of stack",
u"Suspend In Progress")),
("suspend_complete", pgettext_lazy("current status of stack",
u"Suspend Complete")),
("suspend_failed", pgettext_lazy("current status of stack",
u"Suspend Failed")),
("resume_in_progress", pgettext_lazy("current status of stack",
u"Resume In Progress")),
("resume_complete", pgettext_lazy("current status of stack",
u"Resume Complete")),
("resume_failed", pgettext_lazy("current status of stack",
u"Resume Failed")),
("adopt_in_progress", pgettext_lazy("current status of stack",
u"Adopt In Progress")),
("adopt_complete", pgettext_lazy("current status of stack",
u"Adopt Complete")),
("adopt_failed", pgettext_lazy("current status of stack",
u"Adopt Failed")),
("snapshot_in_progress", pgettext_lazy("current status of stack",
u"Snapshot In Progress")),
("snapshot_complete", pgettext_lazy("current status of stack",
u"Snapshot Complete")),
("snapshot_failed", pgettext_lazy("current status of stack",
u"Snapshot Failed")),
("check_in_progress", pgettext_lazy("current status of stack",
u"Check In Progress")),
("check_complete", pgettext_lazy("current status of stack",
u"Check Complete")),
("check_failed", pgettext_lazy("current status of stack",
u"Check Failed")),
)
name = tables.Column("stack_name",
verbose_name=_("Stack Name"),
link="horizon:project:stacks:detail",)
created = tables.Column("creation_time",
verbose_name=_("Created"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
updated = tables.Column("updated_time",
verbose_name=_("Updated"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("status",
hidden=True,
status=True,
status_choices=STATUS_CHOICES)
stack_status = tables.Column("stack_status",
verbose_name=_("Status"),
display_choices=STACK_STATUS_DISPLAY_CHOICES)
def get_object_display(self, stack):
return stack.stack_name
class Meta(object):
name = "stacks"
verbose_name = _("Stacks")
pagination_param = 'stack_marker'
status_columns = ["status", ]
row_class = StacksUpdateRow
table_actions_menu = (CheckStack,
SuspendStack,
ResumeStack,)
table_actions = (LaunchStack,
PreviewStack,
DeleteStack,
StacksFilterAction,)
row_actions = (CheckStack,
SuspendStack,
ResumeStack,
ChangeStackTemplate,
DeleteStack,)
def get_resource_url(obj):
if obj.physical_resource_id == obj.stack_id:
return None
return urlresolvers.reverse('horizon:project:stacks:resource',
args=(obj.stack_id, obj.resource_name))
class EventsTable(tables.DataTable):
logical_resource = tables.Column('resource_name',
verbose_name=_("Stack Resource"),
link=get_resource_url)
physical_resource = tables.Column('physical_resource_id',
verbose_name=_("Resource"))
timestamp = tables.Column('event_time',
verbose_name=_("Time Since Event"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("resource_status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),)
statusreason = tables.Column("resource_status_reason",
verbose_name=_("Status Reason"),)
class Meta(object):
name = "events"
verbose_name = _("Stack Events")
class ResourcesUpdateRow(tables.Row):
ajax = True
def get_data(self, request, resource_name):
try:
stack = self.table.stack
stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
return api.heat.resource_get(
request, stack_identifier, resource_name)
except exc.HTTPNotFound:
# returning 404 to the ajax call removes the
# row from the table on the ui
raise Http404
except Exception as e:
messages.error(request, e)
class ResourcesTable(tables.DataTable):
class StatusColumn(tables.Column):
def get_raw_data(self, datum):
return datum.resource_status.partition("_")[2]
STATUS_CHOICES = (
("Complete", True),
("Failed", False),
)
STATUS_DISPLAY_CHOICES = StacksTable.STACK_STATUS_DISPLAY_CHOICES
logical_resource = tables.Column('resource_name',
verbose_name=_("Stack Resource"),
link=get_resource_url)
physical_resource = tables.Column('physical_resource_id',
verbose_name=_("Resource"),
link=mappings.resource_to_url)
resource_type = tables.Column("resource_type",
verbose_name=_("Stack Resource Type"),)
updated_time = tables.Column('updated_time',
verbose_name=_("Date Updated"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("resource_status",
verbose_name=_("Status"),
display_choices=STATUS_DISPLAY_CHOICES)
statusreason = tables.Column("resource_status_reason",
verbose_name=_("Status Reason"),)
status_hidden = StatusColumn("status",
hidden=True,
status=True,
status_choices=STATUS_CHOICES)
def __init__(self, request, data=None,
needs_form_wrapper=None, **kwargs):
super(ResourcesTable, self).__init__(
request, data, needs_form_wrapper, **kwargs)
self.stack = kwargs['stack']
def get_object_id(self, datum):
return datum.resource_name
class Meta(object):
name = "resources"
verbose_name = _("Stack Resources")
status_columns = ["status_hidden", ]
row_class = ResourcesUpdateRow
|
rmcgibbo/scipy
|
refs/heads/master
|
scipy/optimize/tests/test_lbfgsb_hessinv.py
|
100
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, TestCase, run_module_suite, assert_allclose
import scipy.linalg
from scipy.optimize import minimize
def test_1():
def f(x):
return x**4, 4*x**3
for gtol in [1e-8, 1e-12, 1e-20]:
for maxcor in range(20, 35):
result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20,
options={'gtol': gtol, 'maxcor': maxcor})
H1 = result.hess_inv(np.array([1])).reshape(1,1)
H2 = result.hess_inv.todense()
assert_allclose(H1, H2)
def test_2():
H0 = [[3, 0], [1, 2]]
def f(x):
return np.dot(x, np.dot(scipy.linalg.inv(H0), x))
result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20])
result2 = minimize(fun=f, method='BFGS', x0=[10, 20])
H1 = result1.hess_inv.todense()
H2 = np.vstack((
result1.hess_inv(np.array([1, 0])),
result1.hess_inv(np.array([0, 1]))))
assert_allclose(
result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1),
result1.hess_inv(np.array([1, 0])))
assert_allclose(H1, H2)
assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
if __name__ == "__main__":
run_module_suite()
|
frewsxcv/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
|
235
|
def test_quick(setup):
pass
|
frankiecjunle/yunblog
|
refs/heads/master
|
venv/lib/python2.7/site-packages/requests/packages/charade/euctwfreq.py
|
3132
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
cleech/linux
|
refs/heads/master
|
scripts/gdb/linux/proc.py
|
313
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# Kernel proc information reader
#
# Copyright (c) 2016 Linaro Ltd
#
# Authors:
# Kieran Bingham <kieran.bingham@linaro.org>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import constants
from linux import utils
from linux import tasks
from linux import lists
from struct import *
class LxCmdLine(gdb.Command):
""" Report the Linux Commandline used in the current kernel.
Equivalent to cat /proc/cmdline on a running target"""
def __init__(self):
super(LxCmdLine, self).__init__("lx-cmdline", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(gdb.parse_and_eval("saved_command_line").string() + "\n")
LxCmdLine()
class LxVersion(gdb.Command):
""" Report the Linux Version of the current kernel.
Equivalent to cat /proc/version on a running target"""
def __init__(self):
super(LxVersion, self).__init__("lx-version", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
# linux_banner should contain a newline
gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
LxVersion()
# Resource Structure Printers
# /proc/iomem
# /proc/ioports
def get_resources(resource, depth):
while resource:
yield resource, depth
child = resource['child']
if child:
for res, deep in get_resources(child, depth + 1):
yield res, deep
resource = resource['sibling']
def show_lx_resources(resource_str):
resource = gdb.parse_and_eval(resource_str)
width = 4 if resource['end'] < 0x10000 else 8
# Iterate straight to the first child
for res, depth in get_resources(resource['child'], 0):
start = int(res['start'])
end = int(res['end'])
gdb.write(" " * depth * 2 +
"{0:0{1}x}-".format(start, width) +
"{0:0{1}x} : ".format(end, width) +
res['name'].string() + "\n")
class LxIOMem(gdb.Command):
"""Identify the IO memory resource locations defined by the kernel
Equivalent to cat /proc/iomem on a running target"""
def __init__(self):
super(LxIOMem, self).__init__("lx-iomem", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
return show_lx_resources("iomem_resource")
LxIOMem()
class LxIOPorts(gdb.Command):
"""Identify the IO port resource locations defined by the kernel
Equivalent to cat /proc/ioports on a running target"""
def __init__(self):
super(LxIOPorts, self).__init__("lx-ioports", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
return show_lx_resources("ioport_resource")
LxIOPorts()
# Mount namespace viewer
# /proc/mounts
def info_opts(lst, opt):
opts = ""
for key, string in lst.items():
if opt & key:
opts += string
return opts
FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync",
constants.LX_SB_MANDLOCK: ",mand",
constants.LX_SB_DIRSYNC: ",dirsync",
constants.LX_SB_NOATIME: ",noatime",
constants.LX_SB_NODIRATIME: ",nodiratime"}
MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid",
constants.LX_MNT_NODEV: ",nodev",
constants.LX_MNT_NOEXEC: ",noexec",
constants.LX_MNT_NOATIME: ",noatime",
constants.LX_MNT_NODIRATIME: ",nodiratime",
constants.LX_MNT_RELATIME: ",relatime"}
mount_type = utils.CachedType("struct mount")
mount_ptr_type = mount_type.get_type().pointer()
class LxMounts(gdb.Command):
"""Report the VFS mounts of the current process namespace.
Equivalent to cat /proc/mounts on a running target
An integer value can be supplied to display the mount
values of that process namespace"""
def __init__(self):
super(LxMounts, self).__init__("lx-mounts", gdb.COMMAND_DATA)
# Equivalent to proc_namespace.c:show_vfsmnt
# However, that has the ability to call into s_op functions
# whereas we cannot and must make do with the information we can obtain.
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) >= 1:
try:
pid = int(argv[0])
except gdb.error:
raise gdb.GdbError("Provide a PID as integer value")
else:
pid = 1
task = tasks.get_task_by_pid(pid)
if not task:
raise gdb.GdbError("Couldn't find a process with PID {}"
.format(pid))
namespace = task['nsproxy']['mnt_ns']
if not namespace:
raise gdb.GdbError("No namespace for current process")
gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format(
"mount", "super_block", "devname", "pathname", "fstype"))
for vfs in lists.list_for_each_entry(namespace['list'],
mount_ptr_type, "mnt_list"):
devname = vfs['mnt_devname'].string()
devname = devname if devname else "none"
pathname = ""
parent = vfs
while True:
mntpoint = parent['mnt_mountpoint']
pathname = utils.dentry_name(mntpoint) + pathname
if (parent == parent['mnt_parent']):
break
parent = parent['mnt_parent']
if (pathname == ""):
pathname = "/"
superblock = vfs['mnt']['mnt_sb']
fstype = superblock['s_type']['name'].string()
s_flags = int(superblock['s_flags'])
m_flags = int(vfs['mnt']['mnt_flags'])
rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw"
gdb.write("{} {} {} {} {} {}{}{} 0 0\n".format(
vfs.format_string(), superblock.format_string(), devname,
pathname, fstype, rd, info_opts(FS_INFO, s_flags),
info_opts(MNT_INFO, m_flags)))
LxMounts()
class LxFdtDump(gdb.Command):
"""Output Flattened Device Tree header and dump FDT blob to the filename
specified as the command argument. Equivalent to
'cat /proc/fdt > fdtdump.dtb' on a running target"""
def __init__(self):
super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
def fdthdr_to_cpu(self, fdt_header):
fdt_header_be = ">IIIIIII"
fdt_header_le = "<IIIIIII"
if utils.get_target_endianness() == 1:
output_fmt = fdt_header_le
else:
output_fmt = fdt_header_be
return unpack(output_fmt, pack(fdt_header_be,
fdt_header['magic'],
fdt_header['totalsize'],
fdt_header['off_dt_struct'],
fdt_header['off_dt_strings'],
fdt_header['off_mem_rsvmap'],
fdt_header['version'],
fdt_header['last_comp_version']))
def invoke(self, arg, from_tty):
if not constants.LX_CONFIG_OF:
raise gdb.GdbError("Kernel not compiled with CONFIG_OF\n")
if len(arg) == 0:
filename = "fdtdump.dtb"
else:
filename = arg
py_fdt_header_ptr = gdb.parse_and_eval(
"(const struct fdt_header *) initial_boot_params")
py_fdt_header = py_fdt_header_ptr.dereference()
fdt_header = self.fdthdr_to_cpu(py_fdt_header)
if fdt_header[0] != constants.LX_OF_DT_HEADER:
raise gdb.GdbError("No flattened device tree magic found\n")
gdb.write("fdt_magic: 0x{:02X}\n".format(fdt_header[0]))
gdb.write("fdt_totalsize: 0x{:02X}\n".format(fdt_header[1]))
gdb.write("off_dt_struct: 0x{:02X}\n".format(fdt_header[2]))
gdb.write("off_dt_strings: 0x{:02X}\n".format(fdt_header[3]))
gdb.write("off_mem_rsvmap: 0x{:02X}\n".format(fdt_header[4]))
gdb.write("version: {}\n".format(fdt_header[5]))
gdb.write("last_comp_version: {}\n".format(fdt_header[6]))
inf = gdb.inferiors()[0]
fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr,
fdt_header[1]).tobytes()
try:
f = open(filename, 'wb')
except gdb.error:
raise gdb.GdbError("Could not open file to dump fdt")
f.write(fdt_buf)
f.close()
gdb.write("Dumped fdt blob to " + filename + "\n")
LxFdtDump()
|
mindw/shapely
|
refs/heads/xy
|
shapely/geometry/collection.py
|
19
|
"""Multi-part collections of geometries
"""
from ctypes import c_void_p
from shapely.geos import lgeos
from shapely.geometry.base import BaseGeometry
from shapely.geometry.base import BaseMultipartGeometry
from shapely.geometry.base import HeterogeneousGeometrySequence
from shapely.geometry.base import geos_geom_from_py
class GeometryCollection(BaseMultipartGeometry):
"""A heterogenous collection of geometries
Attributes
----------
geoms : sequence
A sequence of Shapely geometry instances
"""
def __init__(self, geoms=None):
"""
Parameters
----------
geoms : list
A list of shapely geometry instances, which may be heterogenous.
Example
-------
Create a GeometryCollection with a Point and a LineString
>>> p = Point(51, -1)
>>> l = LineString([(52, -1), (49, 2)])
>>> gc = GeometryCollection([p, l])
"""
BaseMultipartGeometry.__init__(self)
if not geoms:
pass
else:
self._geom, self._ndim = geos_geometrycollection_from_py(geoms)
@property
def __geo_interface__(self):
geometries = []
for geom in self.geoms:
geometries.append(geom.__geo_interface__)
return dict(type='GeometryCollection', geometries=geometries)
@property
def geoms(self):
if self.is_empty:
return []
return HeterogeneousGeometrySequence(self)
def geos_geometrycollection_from_py(ob):
"""Creates a GEOS GeometryCollection from a list of geometries"""
L = len(ob)
N = 2
subs = (c_void_p * L)()
for l in range(L):
assert(isinstance(ob[l], BaseGeometry))
if ob[l].has_z:
N = 3
geom, n = geos_geom_from_py(ob[l])
subs[l] = geom
return (lgeos.GEOSGeom_createCollection(7, subs, L), N)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
erikrose/pip
|
refs/heads/develop
|
pip/_vendor/html5lib/treeadapters/sax.py
|
1835
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
vgrem/SharePointOnline-REST-Python-Client
|
refs/heads/master
|
tests/test_sharepoint_comminication_site.py
|
1
|
import uuid
from unittest import TestCase
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.portal.SPSiteCreationRequest import SPSiteCreationRequest
from office365.sharepoint.portal.SPSiteManager import SPSiteManager
from office365.sharepoint.portal.SiteStatus import SiteStatus
from settings import settings
from tests.sharepoint_case import SPTestCase
def load_current_user(ctx):
current_user = ctx.web.currentUser
ctx.load(current_user)
ctx.execute_query()
return current_user
class TestCommunicationSite(TestCase):
site_response = None
@classmethod
def setUpClass(cls):
super(TestCommunicationSite, cls).setUpClass()
ctx_auth = AuthenticationContext(url=settings['url'])
ctx_auth.acquire_token_for_user(username=settings['user_credentials']['username'],
password=settings['user_credentials']['password'])
cls.client = ClientContext(settings['url'], ctx_auth)
cls.site_manager = SPSiteManager(cls.client)
def test1_create_site(self):
current_user = load_current_user(self.client)
site_url = "{0}sites/{1}".format(settings["url"], uuid.uuid4().hex)
request = SPSiteCreationRequest("CommSite123", site_url, current_user.properties['UserPrincipalName'])
response = self.site_manager.create(request)
self.client.execute_query()
self.assertIsNotNone(response.SiteStatus)
self.__class__.site_response = response
def test2_get_site_status(self):
response = self.site_manager.get_status(self.__class__.site_response.SiteUrl)
self.client.execute_query()
self.assertIsNotNone(response.SiteStatus)
self.assertTrue(response.SiteStatus != SiteStatus.Error)
def test3_delete_site(self):
self.site_manager.delete(self.__class__.site_response.SiteId)
self.client.execute_query()
|
jaysonmuyot/Final-project
|
refs/heads/master
|
tailbone/files/__init__.py
|
34
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# shared resources and global variables
from tailbone import AppError
from tailbone import PREFIX
from tailbone import as_json
from tailbone import BreakError
from tailbone import config
from tailbone import DEBUG
from tailbone import restful
import re
import urllib
import webapp2
HAS_PIL = True
try:
import PIL
from google.appengine.api.images import delete_serving_url
from google.appengine.api.images import get_serving_url_async
except:
HAS_PIL = False
from google.appengine.ext.ndb import blobstore
from google.appengine.ext import blobstore as bs
from google.appengine.ext.webapp import blobstore_handlers
re_image = re.compile(r"image/(png|jpeg|jpg|webp|gif|bmp|tiff|ico)", re.IGNORECASE)
class BlobInfo(blobstore.BlobInfo):
def to_dict(self, *args, **kwargs):
result = super(BlobInfo, self).to_dict(*args, **kwargs)
result["Id"] = str(self.key())
return result
def blob_info_to_dict(blob_info):
d = {}
for prop in ["content_type", "creation", "filename", "size"]:
d[prop] = getattr(blob_info, prop)
key = blob_info.key()
if HAS_PIL and re_image.match(blob_info.content_type):
d["image_url"] = get_serving_url_async(key)
d["Id"] = str(key)
return d
class FilesHandler(blobstore_handlers.BlobstoreDownloadHandler):
@as_json
def get(self, key):
if key == "": # query
if not config.is_current_user_admin():
raise AppError("User must be administrator.")
return restful.query(self, BlobInfo)
elif key == "create":
return {
"upload_url": blobstore.create_upload_url("/api/files/upload")
}
key = str(urllib.unquote(key))
blob_info = bs.BlobInfo.get(key)
if blob_info:
self.send_blob(blob_info)
raise BreakError
else:
self.error(404)
return {"error": "File not found with key " + key}
@as_json
def post(self, _):
raise AppError("You must make a GET call to /api/files/create to get a POST url.")
@as_json
def put(self, _):
raise AppError("PUT is not supported for the files api.")
@as_json
def delete(self, key):
if not config.is_current_user_admin():
raise AppError("User must be administrator.")
key = blobstore.BlobKey(str(urllib.unquote(key)))
blob_info = BlobInfo.get(key)
if blob_info:
blob_info.delete()
if HAS_PIL and re_image.match(blob_info.content_type):
delete_serving_url(key)
return {}
else:
self.error(404)
return {"error": "File not found with key " + key}
class FilesUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
@as_json
def post(self):
return [blob_info_to_dict(b) for b in self.get_uploads()]
app = webapp2.WSGIApplication([
(r"{}files/upload".format(PREFIX), FilesUploadHandler),
(r"{}files/?(.*)".format(PREFIX), FilesHandler),
], debug=DEBUG)
|
invisiblek/android_kernel_oneplus_msm8974
|
refs/heads/cm-12.0
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
vrv/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
|
82
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batches `Series` objects. For internal use, not part of the public API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.training import input as input_ops
class AbstractBatchTransform(transform.TensorFlowTransform):
"""Abstract parent class for batching Transforms."""
def __init__(self,
batch_size,
output_names,
num_threads=1,
queue_capacity=None):
super(AbstractBatchTransform, self).__init__()
self._batch_size = batch_size
self._output_name_list = output_names
self._num_threads = num_threads
self._queue_capacity = (self.batch_size * 10 if queue_capacity is None
else queue_capacity)
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@property
def input_valency(self):
return len(self.output_names)
@property
def _output_names(self):
return self._output_name_list
class Batch(AbstractBatchTransform):
"""Batches Columns to specified size.
Note that dimension 0 is assumed to correspond to "example number" so
`Batch` does not prepend an additional dimension to incoming `Series`.
For example, if a `Tensor` in `transform_input` has shape [x, y], the
corresponding output will have shape [batch_size, y].
"""
@property
def name(self):
return "Batch"
def _apply_transform(self, transform_input, **kwargs):
batched = input_ops.batch(transform_input,
batch_size=self.batch_size,
num_threads=self.num_threads,
capacity=self.queue_capacity,
enqueue_many=True)
# TODO(jamieas): batch will soon return a list regardless of the number of
# enqueued tensors. Remove the following once that change is in place.
if not isinstance(batched, (tuple, list)):
batched = (batched,)
# pylint: disable=not-callable
return self.return_type(*batched)
class ShuffleBatch(AbstractBatchTransform):
"""Creates shuffled batches from `Series` containing a single row.
Note that dimension 0 is assumed to correspond to "example number" so
`ShuffleBatch` does not prepend an additional dimension to incoming `Series`.
For example, if a `Tensor` in `transform_input` has shape [x, y], the
corresponding output will have shape [batch_size, y].
"""
@property
def name(self):
return "ShuffleBatch"
def __init__(self,
batch_size,
output_names,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
super(ShuffleBatch, self).__init__(batch_size, output_names, num_threads,
queue_capacity)
self._min_after_dequeue = int(self.queue_capacity / 4
if min_after_dequeue is None
else min_after_dequeue)
self._seed = seed
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
def _apply_transform(self, transform_input, **kwargs):
batched = input_ops.shuffle_batch(transform_input,
batch_size=self.batch_size,
capacity=self.queue_capacity,
min_after_dequeue=self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
enqueue_many=True)
# TODO(jamieas): batch will soon return a list regardless of the number of
# enqueued tensors. Remove the following once that change is in place.
if not isinstance(batched, (tuple, list)):
batched = (batched,)
# pylint: disable=not-callable
return self.return_type(*batched)
|
stanford-mast/nn_dataflow
|
refs/heads/master
|
nn_dataflow/tests/loop_blocking_test/test_loop_blocking_solver.py
|
1
|
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from nn_dataflow.core import DataCategoryEnum as de
from nn_dataflow.core import loop_blocking_solver
from nn_dataflow.core import MemHierEnum as me
from nn_dataflow.core import Option
from . import TestLoopBlockingFixture
class TestLoopBlockingSolver(TestLoopBlockingFixture):
''' Tests for loop_blocking_solver module. '''
def setUp(self):
super(TestLoopBlockingSolver, self).setUp()
# Bypass solver for each reside data category.
self.optkeys_bypsol = ['BYPSOL_{}'.format(dce) for dce in range(de.NUM)]
for reside_dce in range(de.NUM):
opt_dict = self.options['BYPSOL']._asdict()
byp = [True] * de.NUM
byp[reside_dce] = False
opt_dict['sw_gbuf_bypass'] = tuple(byp)
self.options[self.optkeys_bypsol[reside_dce]] = Option(**opt_dict)
def test_reside_sol(self):
''' Data reside solution. '''
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
for bl_ts, bl_ords \
in loop_blocking_solver.gen_loopblocking_gbuf_reside(
self.nld['BASE'], self.resource['BASE'],
self.options[optkey]):
lbs = self._lbs(bl_ts, bl_ords, optkey=optkey)
self.assertTrue(lbs.stored_in_gbuf[reside_dce])
self.assertFalse(any(lbs.stored_in_gbuf[dce]
for dce in range(de.NUM)
if dce != reside_dce))
def test_reside_sol_opt(self, rsrckey='BASE', wlkey='BASE'):
''' Data reside solution optimal. '''
def _cost(lbs):
access = lbs.get_access()
return [int(sum(access[me.DRAM])), int(sum(access[me.GBUF]))]
min_sch_dict = {}
sol_sch_dict = {}
# Among all schemes that bypass all non-reside data categories.
for bl_ts, bl_ords in self._gen_loopblocking_all(wlkey=wlkey):
lbs = self._lbs(bl_ts, bl_ords, wlkey=wlkey, rsrckey=rsrckey,
optkey='BYP')
if not lbs.is_valid():
continue
all_reside_dce = [dce for dce in range(de.NUM)
if lbs.stored_in_gbuf[dce]]
# Only look at the cases with one or none reside data category.
if not all_reside_dce:
min_sch = min_sch_dict.get(None, None)
if not min_sch or _cost(lbs) < min_sch:
min_sch_dict[None] = _cost(lbs)
elif len(all_reside_dce) == 1:
dce, = all_reside_dce
min_sch = min_sch_dict.get(dce, None)
if not min_sch or _cost(lbs) < min_sch:
min_sch_dict[dce] = _cost(lbs)
# Solve each reside data category.
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
for bl_ts, bl_ords \
in loop_blocking_solver.gen_loopblocking_gbuf_reside(
self.nld[wlkey], self.resource[rsrckey],
self.options[optkey]):
lbs = self._lbs(bl_ts, bl_ords, wlkey=wlkey, rsrckey=rsrckey,
optkey='BYP')
self.assertTrue(lbs.is_valid())
self.assertFalse(any(lbs.stored_in_gbuf[dce]
for dce in range(de.NUM)
if dce != reside_dce))
true_reside_dce = reside_dce \
if lbs.stored_in_gbuf[reside_dce] else None
sol_sch = sol_sch_dict.get(true_reside_dce, None)
if not sol_sch or _cost(lbs) < sol_sch:
sol_sch_dict[true_reside_dce] = _cost(lbs)
self.assertTrue(sol_sch_dict.items() <= min_sch_dict.items(),
'test_reside_sol_opt: wlkey {} rsrckey {}: '
'solutions do not cover all optimal ones. '
'sol {} opt {}.'
.format(wlkey, rsrckey, sol_sch_dict, min_sch_dict))
self.assertListEqual(
min(sol_sch_dict.values()), min(min_sch_dict.values()),
'test_reside_sol_opt: wlkey {} rsrckey {}: '
'solutions do not cover the optimal one. sol {} opt {}.'
.format(wlkey, rsrckey, sol_sch_dict, min_sch_dict))
def test_reside_sol_opt_resource(self):
''' Data reside solution optimal with different resources. '''
for rsrckey in ['LG', 'SM']:
self.test_reside_sol_opt(rsrckey=rsrckey)
def test_reside_sol_opt_pool(self):
''' Data reside solution optimal with PoolingLayer. '''
with self.assertRaisesRegex(ValueError, 'loop_blocking_solver: .*'):
self.test_reside_sol_opt(wlkey='POOL')
def test_reside_sol_opt_zero(self):
''' Data reside solution optimal with zero size. '''
for wlkey in ['ZERO_FIL', 'ZERO_IFM']:
self.test_reside_sol_opt(wlkey=wlkey)
def test_reside_sol_cnt(self):
''' Data reside solution count. '''
all_set = set(loop_blocking_solver.gen_loopblocking_gbuf_reside(
self.nld['BASE'], self.resource['BASE'], self.options['BYPSOL']))
union_set = set()
reside_set_list = []
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
s = set(loop_blocking_solver.gen_loopblocking_gbuf_reside(
self.nld['BASE'], self.resource['BASE'], self.options[optkey]))
reside_set_list.append(s)
union_set |= s
self.assertSetEqual(all_set, union_set)
self.assertEqual(len(union_set), sum(len(s) for s in reside_set_list))
|
openxc/openxc-python
|
refs/heads/master
|
openxc/sources/bluetooth.py
|
1
|
"""A Bluetooth data source."""
import logging
from openxc.controllers.base import Controller
from .socket import SocketDataSource
from .base import DataSourceError
LOG = logging.getLogger(__name__)
try:
import bluetooth
except ImportError:
LOG.debug("pybluez library not installed, can't use bluetooth interface")
bluetooth = None
class BluetoothVehicleInterface(SocketDataSource, Controller):
"""A data source reading from a bluetooth device.
"""
OPENXC_DEVICE_NAME_PREFIX = "OpenXC-VI-"
def __init__(self, address=None, **kwargs):
"""Initialize a connection to the bluetooth device.
Raises:
DataSourceError if the bluetooth device cannot be opened.
"""
super(BluetoothVehicleInterface, self).__init__(**kwargs)
self.address = address
if bluetooth is None:
raise DataSourceError("pybluez library is not available")
while self.address is None:
self.scan_for_bluetooth_device()
self.connect()
def connect(self):
# TODO push this to a background connecting thread so the constructor
# can return
port = 1
connected = False
while not connected:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
self.socket.connect((self.address, port))
except IOError as e:
LOG.warn("Unable to connect to %s" % self.address, e)
else:
LOG.info("Opened bluetooth device at %s", port)
connected = True
def scan_for_bluetooth_device(self):
nearby_devices = bluetooth.discover_devices()
self.address = None
device_name = None
for address in nearby_devices:
device_name = bluetooth.lookup_name(address)
if (device_name is not None and
device_name.startswith(self.OPENXC_DEVICE_NAME_PREFIX)):
self.address = address
break
if self.address is not None:
LOG.info("Discovered OpenXC VI %s (%s)" % (device_name, self.address))
else:
LOG.info("No OpenXC VI devices discovered")
|
FNCS/ns-3.22
|
refs/heads/master
|
src/visualizer/visualizer/plugins/olsr.py
|
182
|
import gtk
import ns.core
import ns.network
import ns.internet
import ns.olsr
from visualizer.base import InformationWindow
class ShowOlsrRoutingTable(InformationWindow):
(
COLUMN_DESTINATION,
COLUMN_NEXT_HOP,
COLUMN_INTERFACE,
COLUMN_NUM_HOPS,
) = range(4)
def __init__(self, visualizer, node_index):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.set_default_size(gtk.gdk.screen_width()/2, gtk.gdk.screen_height()/2)
self.win.connect("response", self._response_cb)
self.win.set_title("OLSR routing table for node %i" % node_index)
self.visualizer = visualizer
self.node_index = node_index
self.table_model = gtk.ListStore(str, str, str, int)
treeview = gtk.TreeView(self.table_model)
treeview.show()
sw = gtk.ScrolledWindow()
sw.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
sw.show()
sw.add(treeview)
self.win.vbox.add(sw)
# Dest.
column = gtk.TreeViewColumn('Destination', gtk.CellRendererText(),
text=self.COLUMN_DESTINATION)
treeview.append_column(column)
# Next hop
column = gtk.TreeViewColumn('Next hop', gtk.CellRendererText(),
text=self.COLUMN_NEXT_HOP)
treeview.append_column(column)
# Interface
column = gtk.TreeViewColumn('Interface', gtk.CellRendererText(),
text=self.COLUMN_INTERFACE)
treeview.append_column(column)
# Num. Hops
column = gtk.TreeViewColumn('Num. Hops', gtk.CellRendererText(),
text=self.COLUMN_NUM_HOPS)
treeview.append_column(column)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
node = ns.network.NodeList.GetNode(self.node_index)
olsr = node.GetObject(ns.olsr.olsr.RoutingProtocol.GetTypeId())
ipv4 = node.GetObject(ns.internet.Ipv4.GetTypeId())
if olsr is None:
return
self.table_model.clear()
for route in olsr.GetRoutingTableEntries():
tree_iter = self.table_model.append()
netdevice = ipv4.GetNetDevice(route.interface)
if netdevice is None:
interface_name = 'lo'
else:
interface_name = ns.core.Names.FindName(netdevice)
if not interface_name:
interface_name = "(interface %i)" % route.interface
self.table_model.set(tree_iter,
self.COLUMN_DESTINATION, str(route.destAddr),
self.COLUMN_NEXT_HOP, str(route.nextAddr),
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_NUM_HOPS, route.distance)
def populate_node_menu(viz, node, menu):
ns3_node = ns.network.NodeList.GetNode(node.node_index)
olsr = ns3_node.GetObject(ns.olsr.olsr.RoutingProtocol.GetTypeId())
if olsr is None:
print "No OLSR"
return
menu_item = gtk.MenuItem("Show OLSR Routing Table")
menu_item.show()
def _show_ipv4_routing_table(dummy_menu_item):
ShowOlsrRoutingTable(viz, node.node_index)
menu_item.connect("activate", _show_ipv4_routing_table)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
|
jonfoster/pyxb2
|
refs/heads/master
|
pyxb/binding/basis.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright 2009-2013, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains support classes from which schema-specific bindings
inherit, and that describe the content models of those schema."""
import logging
import pyxb
import xml.dom
import pyxb.utils.domutils as domutils
import pyxb.utils.utility as utility
import types
import pyxb.namespace
import collections
from pyxb.namespace.builtin import XMLSchema_instance as XSI
_log = logging.getLogger(__name__)
class _TypeBinding_mixin (utility.Locatable_mixin):
# Private member holding the validation configuration that applies to the
# class or instance. Can't really make it private with __ prefix because
# that would screw up parent classes. You end users---stay away from
# this.
_validationConfig_ = pyxb.GlobalValidationConfig
@classmethod
def _SetValidationConfig (cls, validation_config):
"""Set the validation configuration for this class."""
cls._validationConfig_ = validation_config
@classmethod
def _GetValidationConfig (cls):
"""The L{pyxb.ValidationConfig} instance that applies to this class.
By default this will reference L{pyxb.GlobalValidationConfig}."""
return cls._validationConfig_
def _setValidationConfig (self, validation_config):
"""Set the validation configuration for this instance."""
self._validationConfig_ = validation_config
def __getValidationConfig (self):
"""The L{pyxb.ValidationConfig} instance that applies to this instance.
By default this will reference the class value from
L{_GetValidationConfig}, which defaults to
L{pyxb.GlobalValidationConfig}."""
return self._validationConfig_
# This is how you should be accessing this value.
_validationConfig = property(__getValidationConfig)
@classmethod
def _PerformValidation (cls):
"""Determine whether the content model should be validated for this class.
In the absence of context, this returns C{True} iff both binding and
document validation are in force.
@deprecated: use L{_GetValidationConfig} and check specific requirements."""
# Bypass the property since this is a class method
vo = cls._validationConfig_
return vo.forBinding and vo.forDocument
def _performValidation (self):
"""Determine whether the content model should be validated for this
instance.
In the absence of context, this returns C{True} iff both binding and
document validation are in force.
@deprecated: use L{_validationConfig} and check specific requirements."""
vo = self._validationConfig
return vo.forBinding and vo.forDocument
_ExpandedName = None
"""The expanded name of the component."""
_XSDLocation = None
"""Where the definition can be found in the originating schema."""
_ReservedSymbols = set([ 'validateBinding', 'toDOM', 'toxml', 'Factory', 'property' ])
if pyxb._CorruptionDetectionEnabled:
def __setattr__ (self, name, value):
if name in self._ReservedSymbols:
raise pyxb.ReservedNameError(self, name)
return super(_TypeBinding_mixin, self).__setattr__(name, value)
_PyXBFactoryKeywords = ( '_dom_node', '_fallback_namespace', '_from_xml',
'_apply_whitespace_facet', '_validate_constraints',
'_require_value', '_nil', '_element', '_apply_attributes',
'_convert_string_values', '_location' )
"""Keywords that are interpreted by __new__ or __init__ in one or more
classes in the PyXB type hierarchy. All these keywords must be removed
before invoking base Python __init__ or __new__."""
# While simple type definitions cannot be abstract, they can appear in
# many places where complex types can, so we want it to be legal to test
# for abstractness without checking whether the object is a complex type.
_Abstract = False
def _namespaceContext (self):
"""Return a L{namespace context <pyxb.binding.NamespaceContext>}
associated with the binding instance.
This will return C{None} unless something has provided a context to
the instance. Context is provided when instances are generated by the
DOM and SAX-based translators."""
return self.__namespaceContext
def _setNamespaceContext (self, namespace_context):
"""Associate a L{namespace context <pyxb.binding.NamespaceContext>}
with the binding instance."""
self.__namespaceContext = namespace_context
return self
__namespaceContext = None
def _setElement (self, elt):
"""Associate an element binding with the instance.
Since the value of a binding instance reflects only its content, an
associated element is necessary to generate an XML document or DOM
tree.
@param elt: the L{pyxb.binding.basis.element} instance associated with
the value. This may be C{None} when disassociating a value from a
specific element."""
import pyxb.binding.content
assert (elt is None) or isinstance(elt, element)
self.__element = elt
return self
def _element (self):
"""Return a L{pyxb.binding.basis.element} associated with the binding
instance.
This will return C{None} unless an element has been associated.
Constructing a binding instance using the element instance will add
this association.
"""
return self.__element
__element = None
__xsiNil = None
def _isNil (self):
"""Indicate whether this instance is U{nil
<http://www.w3.org/TR/xmlschema-1/#xsi_nil>}.
The value is set by the DOM and SAX parsers when building an instance
from a DOM element with U{xsi:nil
<http://www.w3.org/TR/xmlschema-1/#xsi_nil>} set to C{true}.
@return: C{None} if the element used to create the instance is not
U{nillable<http://www.w3.org/TR/xmlschema-1/#nillable>}.
If it is nillable, returns C{True} or C{False} depending on
whether the instance itself is U{nil<http://www.w3.org/TR/xmlschema-1/#xsi_nil>}.
"""
return self.__xsiNil
def _setIsNil (self, nil=True):
"""Set the xsi:nil property of the instance.
@param nil: C{True} if the value of xsi:nil should be C{true},
C{False} if the value of xsi:nil should be C{false}.
@raise pyxb.NoNillableSupportError: the instance is not associated
with an element that is L{nillable
<pyxb.binding.basis.element.nillable>}.
"""
if self.__xsiNil is None:
raise pyxb.NoNillableSupportError(self)
self.__xsiNil = not not nil
if self.__xsiNil:
# The element must be empty, so also remove all element content.
# Attribute values are left unchanged.
self._resetContent(reset_elements=True)
def _resetContent (self, reset_elements=False):
"""Reset the content of an element value.
This is not a public method.
For simple types, this does nothing. For complex types, this clears the
L{content<complexTypeDefinition.content>} array, removing all
non-element content from the instance. It optionally also removes all
element content.
@param reset_elements: If C{False} (default) only the content array is
cleared, which has the effect of removing any preference for element
order when generating a document. If C{True}, the element content
stored within the binding is also cleared, leaving it with no content
at all.
@note: This is not the same thing as L{complexTypeDefinition.reset},
which unconditionally resets attributes and element and non-element
content.
"""
pass
__constructedWithValue = False
def __checkNilCtor (self, args):
self.__constructedWithValue = (0 < len(args))
if self.__xsiNil:
if self.__constructedWithValue:
raise pyxb.ContentInNilInstanceError(self, args[0])
else:
# Types that descend from string will, when constructed from an
# element with empty content, appear to have no constructor value,
# while in fact an empty string should have been passed.
if issubclass(type(self), basestring):
self.__constructedWithValue = True
def _constructedWithValue (self):
return self.__constructedWithValue
# Flag used to control whether we print a warning when creating a complex
# type instance that does not have an associated element. Not sure yet
# whether that'll be common practice or common error.
__WarnedUnassociatedElement = False
def __init__ (self, *args, **kw):
# Strip keyword not used above this level.
element = kw.pop('_element', None)
is_nil = kw.pop('_nil', False)
super(_TypeBinding_mixin, self).__init__(*args, **kw)
if (element is None) or element.nillable():
self.__xsiNil = is_nil
if element is not None:
self._setElement(element)
self.__checkNilCtor(args)
@classmethod
def _PreFactory_vx (cls, args, kw):
"""Method invoked upon entry to the Factory method.
This method is entitled to modify the keywords array. It can also
return a state value which is passed to _postFactory_vx."""
return None
def _postFactory_vx (cls, state):
"""Method invoked prior to leaving the Factory method.
This is an instance method, and is given the state that was returned
by _PreFactory_vx."""
return None
@classmethod
def Factory (cls, *args, **kw):
"""Provide a common mechanism to create new instances of this type.
The class constructor won't do, because you can't create
instances of union types.
This method may be overridden in subclasses (like STD_union). Pre-
and post-creation actions can be customized on a per-class instance by
overriding the L{_PreFactory_vx} and L{_postFactory_vx} methods.
@keyword _dom_node: If provided, the value must be a DOM node, the
content of which will be used to set the value of the instance.
@keyword _location: An optional instance of
L{pyxb.utils.utility.Location} showing the origin the binding. If
C{None}, a value from C{_dom_node} is used if available.
@keyword _from_xml: If C{True}, the input must be either a DOM node or
a unicode string comprising a lexical representation of a value. This
is a further control on C{_apply_whitespace_facet} and arises from
cases where the lexical and value representations cannot be
distinguished by type. The default value is C{True} iff C{_dom_node}
is not C{None}.
@keyword _apply_whitespace_facet: If C{True} and this is a
simpleTypeDefinition with a whiteSpace facet, the first argument will
be normalized in accordance with that facet prior to invoking the
parent constructor. The value is always C{True} if text content is
extracted from a C{_dom_node}, and otherwise defaults to the defaulted
value of C{_from_xml}.
@keyword _validate_constraints: If C{True}, any constructed value is
checked against constraints applied to the union as well as the member
type.
@keyword _require_value: If C{False} (default), it is permitted to
create a value without an initial value. If C{True} and no initial
value was provided, causes L{pyxb.SimpleContentAbsentError} to be raised.
Only applies to simpleTypeDefinition instances; this is used when
creating values from DOM nodes.
"""
# Invoke _PreFactory_vx for the superseding class, which is where
# customizations will be found.
dom_node = kw.get('_dom_node')
location = kw.get('_location')
if (location is None) and isinstance(dom_node, utility.Locatable_mixin):
location = dom_node._location()
kw.setdefault('_from_xml', dom_node is not None)
used_cls = cls._SupersedingClass()
state = used_cls._PreFactory_vx(args, kw)
rv = cls._DynamicCreate(*args, **kw)
rv._postFactory_vx(state)
if (rv._location is None) and (location is not None):
rv._setLocation(location)
return rv
def _substitutesFor (self, element):
if (element is None) or (self._element() is None):
return False
return self._element().substitutesFor(element)
@classmethod
def _IsUrType (cls):
"""Return C{True} iff this is the ur-type.
The only ur-type is {http://www.w3.org/2001/XMLSchema}anyType. The
implementation of this method is overridden for
L{pyxb.binding.datatypes.anyType}."""
return False
@classmethod
def _RequireXSIType (cls, value_type):
if cls._IsUrType():
# Require xsi:type if value refines xs:anyType
return value_type != cls
return cls._Abstract and value_type != cls._SupersedingClass()
@classmethod
def _CompatibleValue (cls, value, **kw):
"""Return a variant of the value that is compatible with this type.
Compatibility is defined relative to the type definition associated
with the element. The value C{None} is always compatible. If
C{value} has a Python type (e.g., C{int}) that is a superclass of the
required L{_TypeBinding_mixin} class (e.g., C{xs:byte}), C{value} is
used as a constructor parameter to return a new instance of the
required type. Note that constraining facets are applied here if
necessary (e.g., although a Python C{int} with value C{500} is
type-compatible with C{xs:byte}, it is outside the value space, and
compatibility will fail).
@keyword _convert_string_values: If C{True} (default) and the incoming value is
a string, an attempt will be made to form a compatible value by using
the string as a constructor argument to the this class. This flag is
set to C{False} when testing automaton transitions.
@raise pyxb.SimpleTypeValueError: if the value is not both
type-consistent and value-consistent with the element's type.
"""
convert_string_values = kw.get('_convert_string_values', True)
# None is always None
if value is None:
return None
# Already an instance?
if isinstance(value, cls):
# @todo: Consider whether we should change the associated _element
# of this value. (**Consider** it, don't just do it.)
return value
value_type = type(value)
# All string-based PyXB binding types use unicode, not str
if str == value_type:
value_type = unicode
# See if we got passed a Python value which needs to be "downcasted"
# to the _TypeBinding_mixin version.
if issubclass(cls, value_type):
return cls(value)
# See if we have a numeric type that needs to be cast across the
# numeric hierarchy. int to long is the *only* conversion we accept.
if isinstance(value, int) and issubclass(cls, long):
return cls(value)
# Same, but for boolean, which Python won't let us subclass
if isinstance(value, bool) and issubclass(cls, pyxb.binding.datatypes.boolean):
return cls(value)
# See if we have convert_string_values on, and have a string type that
# somebody understands.
if convert_string_values and (unicode == value_type):
return cls(value)
# Maybe this is a union?
if issubclass(cls, STD_union):
for mt in cls._MemberTypes:
try:
return mt._CompatibleValue(value, **kw)
except:
pass
# Any type is compatible with the corresponding ur-type
if (pyxb.binding.datatypes.anySimpleType == cls) and issubclass(value_type, simpleTypeDefinition):
return value
if pyxb.binding.datatypes.anyType == cls:
if not isinstance(value, _TypeBinding_mixin):
_log.info('Created %s instance from value of type %s', cls._ExpandedName, type(value))
value = cls(value)
return value
# Is this the wrapper class that indicates we should create a binding
# from arguments?
if isinstance(value, pyxb.BIND):
return value.createInstance(cls.Factory, **kw)
# Does the class have simple content which we can convert?
if cls._IsSimpleTypeContent():
# NB: byte(34.2) will create a value, but it may not be one we
# want to accept, so only do this if the output is equal to the
# input.
rv = cls.Factory(value)
if isinstance(rv, simpleTypeDefinition) and (rv == value):
return rv
if isinstance(rv, complexTypeDefinition) and (rv.value() == value):
return rv
# There may be other things that can be converted to the desired type,
# but we can't tell that from the type hierarchy. Too many of those
# things result in an undesirable loss of information: for example,
# when an all model supports both numeric and string transitions, the
# candidate is a number, and the string transition is tested first.
raise pyxb.SimpleTypeValueError(cls, value)
@classmethod
def _IsSimpleTypeContent (cls):
"""Return True iff the content of this binding object is a simple type.
This is true only for descendents of simpleTypeDefinition and instances
of complexTypeDefinition that have simple type content."""
raise pyxb.LogicError('Failed to override _TypeBinding_mixin._IsSimpleTypeContent')
# If the type supports wildcard attributes, this describes their
# constraints. (If it doesn't, this should remain None.) Supporting
# classes should override this value.
_AttributeWildcard = None
_AttributeMap = { }
"""Map from expanded names to AttributeUse instances. Non-empty only in
L{complexTypeDefinition} subclasses."""
@classmethod
def __AttributesFromDOM (cls, node):
attribute_settings = { }
for ai in range(0, node.attributes.length):
attr = node.attributes.item(ai)
# NB: Specifically do not consider attr's NamespaceContext, since
# attributes do not accept a default namespace.
attr_en = pyxb.namespace.ExpandedName(attr)
# Ignore xmlns and xsi attributes; we've already handled those
if attr_en.namespace() in ( pyxb.namespace.XMLNamespaces, XSI ):
continue
attribute_settings[attr_en] = attr.value
return attribute_settings
def _setAttributesFromKeywordsAndDOM (self, kw, dom_node):
"""Invoke self._setAttribute based on node attributes and keywords.
Though attributes can only legally appear in complexTypeDefinition
instances, delayed conditional validation requires caching them in
simpleTypeDefinition.
@param kw: keywords passed to the constructor. This map is mutated by
the call: keywords corresponding to recognized attributes are removed.
@param dom_node: an xml.dom Node instance, possibly C{None}
"""
# Extract keywords that match field names
attribute_settings = { }
if dom_node is not None:
attribute_settings.update(self.__AttributesFromDOM(dom_node))
for fu in self._AttributeMap.itervalues():
iv = kw.pop(fu.id(), None)
if iv is not None:
attribute_settings[fu.name()] = iv
for (attr_en, value_lex) in attribute_settings.iteritems():
self._setAttribute(attr_en, value_lex)
def toDOM (self, bds=None, parent=None, element_name=None):
"""Convert this instance to a DOM node.
The name of the top-level element is either the name of the L{element}
instance associated with this instance, or the XML name of the type of
this instance.
@param bds: Support for customizing the generated document
@type bds: L{pyxb.utils.domutils.BindingDOMSupport}
@param parent: If C{None}, a standalone document is created;
otherwise, the created element is a child of the given element.
@type parent: C{xml.dom.Element} or C{None}
@rtype: C{xml.dom.Document}
"""
if bds is None:
bds = domutils.BindingDOMSupport()
need_xsi_type = bds.requireXSIType()
if isinstance(element_name, (str, unicode)):
element_name = pyxb.namespace.ExpandedName(bds.defaultNamespace(), element_name)
if (element_name is None) and (self._element() is not None):
element_binding = self._element()
element_name = element_binding.name()
need_xsi_type = need_xsi_type or element_binding.typeDefinition()._RequireXSIType(type(self))
if element_name is None:
raise pyxb.UnboundElementError(self)
element = bds.createChildElement(element_name, parent)
if need_xsi_type:
val_type_qname = self._ExpandedName.localName()
tns_prefix = bds.namespacePrefix(self._ExpandedName.namespace())
if tns_prefix is not None:
val_type_qname = '%s:%s' % (tns_prefix, val_type_qname)
bds.addAttribute(element, XSI.type, val_type_qname)
self._toDOM_csc(bds, element)
bds.finalize()
return bds.document()
def toxml (self, encoding=None, bds=None, root_only=False):
"""Shorthand to get the object as an XML document.
If you want to set the default namespace, pass in a pre-configured
C{bds}.
@param encoding: The encoding to be used. See
@C{xml.dom.Node.toxml()} for a description of why you should always
pass @C{'utf-8'} here. Because this method follows the contract of
the corresponding C{xml.dom.Node} method, it does not automatically
get the default PyXB output encoding.
@param bds: Optional L{pyxb.utils.domutils.BindingDOMSupport} instance
to use for creation. If not provided (default), a new generic one is
created.
"""
dom = self.toDOM(bds)
if root_only:
dom = dom.documentElement
return dom.toxml(encoding)
def _toDOM_csc (self, dom_support, parent):
assert parent is not None
if self.__xsiNil:
dom_support.addAttribute(parent, XSI.nil, 'true')
return getattr(super(_TypeBinding_mixin, self), '_toDOM_csc', lambda *_args,**_kw: dom_support)(dom_support, parent)
def _validateBinding_vx (self):
"""Override in subclasses for type-specific validation of instance
content.
@return: C{True} if the instance validates
@raise pyxb.BatchContentValidationError: complex content does not match model
@raise pyxb.SimpleTypeValueError: simple content fails to satisfy constraints
"""
raise NotImplementedError('%s._validateBinding_vx' % (type(self).__name__,))
def validateBinding (self):
"""Check whether the binding content matches its content model.
@return: C{True} if validation succeeds.
@raise pyxb.BatchContentValidationError: complex content does not match model
@raise pyxb.SimpleTypeValueError: attribute or simple content fails to satisfy constraints
"""
if self._performValidation():
self._validateBinding_vx()
return True
def _finalizeContentModel (self):
"""Inform content model that all additions have been provided.
This is used to resolve any pending non-determinism when the content
of an element is provided through a DOM assignment or through
positional arguments in a constructor."""
return self
def _postDOMValidate (self):
self.validateBinding()
return self
@classmethod
def _Name (cls):
"""Return the best descriptive name for the type of the instance.
This is intended to be a human-readable value used in diagnostics, and
is the expanded name if the type has one, or the Python type name if
it does not."""
if cls._ExpandedName is not None:
return unicode(cls._ExpandedName)
return unicode(cls)
def _diagnosticName (self):
"""The best name available for this instance in diagnostics.
If the instance is associated with an element, it is the element name;
otherwise it is the best name for the type of the instance per L{_Name}."""
if self.__element is None:
return self._Name()
return unicode(self.__element.name())
class _DynamicCreate_mixin (pyxb.cscRoot):
"""Helper to allow overriding the implementation class.
Generally we'll want to augment the generated bindings by subclassing
them, and adding functionality to the subclass. This mix-in provides a
way to communicate the existence of the superseding subclass back to the
binding infrastructure, so that when it creates an instance it uses the
subclass rather than the unaugmented binding class.
When a raw generated binding is subclassed, L{_SetSupersedingClass} should be
invoked on the raw class passing in the superseding subclass. E.g.::
class mywsdl (raw.wsdl):
pass
raw.wsdl._SetSupersedingClass(mywsdl)
"""
@classmethod
def __SupersedingClassAttribute (cls):
return '_%s__SupersedingClass' % (cls.__name__,)
@classmethod
def __AlternativeConstructorAttribute (cls):
return '_%s__AlternativeConstructor' % (cls.__name__,)
@classmethod
def _SupersedingClass (cls):
"""Return the class stored in the class reference attribute."""
return getattr(cls, cls.__SupersedingClassAttribute(), cls)
@classmethod
def _AlternativeConstructor (cls):
"""Return the class stored in the class reference attribute."""
rv = getattr(cls, cls.__AlternativeConstructorAttribute(), None)
if isinstance(rv, tuple):
rv = rv[0]
return rv
@classmethod
def _SetSupersedingClass (cls, superseding):
"""Set the class reference attribute.
@param superseding: A Python class that is a subclass of this class.
"""
assert (superseding is None) or issubclass(superseding, cls)
if superseding is None:
cls.__dict__.pop(cls.__SupersedingClassAttribute(), None)
else:
setattr(cls, cls.__SupersedingClassAttribute(), superseding)
return superseding
@classmethod
def _SetAlternativeConstructor (cls, alternative_constructor):
attr = cls.__AlternativeConstructorAttribute()
if alternative_constructor is None:
cls.__dict__.pop(attr, None)
else:
# Need to use a tuple as the value: if you use an invokable, this
# ends up converting it from a function to an unbound method,
# which is not what we want.
setattr(cls, attr, (alternative_constructor,))
assert cls._AlternativeConstructor() == alternative_constructor
return alternative_constructor
@classmethod
def _DynamicCreate (cls, *args, **kw):
"""Invoke the constructor for this class or the one that supersedes it."""
ctor = cls._AlternativeConstructor()
if ctor is None:
ctor = cls._SupersedingClass()
try:
return ctor(*args, **kw)
except TypeError:
raise pyxb.SimpleTypeValueError(ctor, args)
class simpleTypeDefinition (_TypeBinding_mixin, utility._DeconflictSymbols_mixin, _DynamicCreate_mixin):
"""L{simpleTypeDefinition} is a base class that is part of the
hierarchy of any class that represents the Python datatype for a
L{SimpleTypeDefinition<pyxb.xmlschema.structures.SimpleTypeDefinition>}.
@note: This class, or a descendent of it, must be the first class
in the method resolution order when a subclass has multiple
parents. Otherwise, constructor keyword arguments may not be
removed before passing them on to Python classes that do not
accept them.
"""
# A map from leaf classes in the facets module to instance of
# those classes that constrain or otherwise affect the datatype.
# Note that each descendent of simpleTypeDefinition has its own map.
__FacetMap = {}
_ReservedSymbols = _TypeBinding_mixin._ReservedSymbols.union(set([ 'XsdLiteral', 'xsdLiteral',
'XsdSuperType', 'XsdPythonType', 'XsdConstraintsOK',
'xsdConstraintsOK', 'XsdValueLength', 'xsdValueLength',
'PythonLiteral', 'pythonLiteral',
'SimpleTypeDefinition' ]))
"""Symbols that remain the responsibility of this class. Any
public symbols in generated binding subclasses are deconflicted
by providing an alternative name in the subclass. (There
currently are no public symbols in generated SimpleTypeDefinion
bindings."""
# Determine the name of the class-private facet map. For the base class
# this should produce the same attribute name as Python's privatization
# scheme.
__FacetMapAttributeNameMap = { }
@classmethod
def __FacetMapAttributeName (cls):
""" """
'''
if cls == simpleTypeDefinition:
return '_%s__FacetMap' % (cls.__name__.strip('_'),)
# It is not uncommon for a class in one namespace to extend a class of
# the same name in a different namespace, so encode the namespace URI
# in the attribute name (if it is part of a namespace).
ns_uri = ''
try:
ns_uri = cls._ExpandedName.namespaceURI()
except Exception:
pass
nm = '_' + utility.MakeIdentifier('%s_%s_FacetMap' % (ns_uri, cls.__name__.strip('_')))
'''
nm = cls.__FacetMapAttributeNameMap.get(cls)
if nm is None:
nm = cls.__name__
if nm.endswith('_'):
nm += '1'
if cls == simpleTypeDefinition:
nm = '_%s__FacetMap' % (nm,)
else:
# It is not uncommon for a class in one namespace to extend a class of
# the same name in a different namespace, so encode the namespace URI
# in the attribute name (if it is part of a namespace).
ns_uri = ''
try:
ns_uri = cls._ExpandedName.namespaceURI()
except Exception:
pass
nm = '_' + utility.MakeIdentifier('%s_%s_FacetMap' % (ns_uri, nm))
cls.__FacetMapAttributeNameMap[cls] = nm
return nm
@classmethod
def _FacetMap (cls):
"""Return a reference to the facet map for this datatype.
The facet map is a map from leaf facet classes to instances of those
classes that constrain or otherwise apply to the lexical or value
space of the datatype. Classes may inherit their facet map from their
superclass, or may create a new class instance if the class adds a new
constraint type.
@raise AttributeError: if the facet map has not been defined"""
return getattr(cls, cls.__FacetMapAttributeName())
@classmethod
def _InitializeFacetMap (cls, *args):
"""Initialize the facet map for this datatype.
This must be called exactly once, after all facets belonging to the
datatype have been created.
@raise pyxb.LogicError: if called multiple times (on the same class)
@raise pyxb.LogicError: if called when a parent class facet map has not been initialized
:return: the facet map"""
fm = None
try:
fm = cls._FacetMap()
except AttributeError:
pass
if fm is not None:
raise pyxb.LogicError('%s facet map initialized multiple times: %s' % (cls.__name__, cls.__FacetMapAttributeName()))
# Search up the type hierarchy to find the nearest ancestor that has a
# facet map. This gets a bit tricky: if we hit the ceiling early
# because the PSTD hierarchy re-based itself on a new Python type, we
# have to jump to the XsdSuperType.
source_class = cls
while fm is None:
# Assume we're staying in this hierarchy. Include source_class in
# the candidates, since we might have jumped to it.
for super_class in source_class.mro():
assert super_class is not None
if (super_class == simpleTypeDefinition): # and (source_class.XsdSuperType() is not None):
break
if issubclass(super_class, simpleTypeDefinition):
try:
fm = super_class._FacetMap()
break
except AttributeError:
pass
if fm is None:
try:
source_class = source_class.XsdSuperType()
except AttributeError:
source_class = None
if source_class is None:
fm = { }
if fm is None:
raise pyxb.LogicError('%s is not a child of simpleTypeDefinition' % (cls.__name__,))
fm = fm.copy()
for facet in args:
fm[type(facet)] = facet
setattr(cls, cls.__FacetMapAttributeName(), fm)
return fm
@classmethod
def _ConvertArguments_vx (cls, args, kw):
return args
@classmethod
def _ConvertArguments (cls, args, kw):
"""Pre-process the arguments.
This is used before invoking the parent constructor. One application
is to apply the whitespace facet processing; if such a request is in
the keywords, it is removed so it does not propagate to the
superclass. Another application is to convert the arguments from a
string to a list. Binding-specific applications are performed in the
overloaded L{_ConvertArguments_vx} method."""
dom_node = kw.pop('_dom_node', None)
from_xml = kw.get('_from_xml', dom_node is not None)
if dom_node is not None:
text_content = domutils.ExtractTextContent(dom_node)
if text_content is not None:
args = (domutils.ExtractTextContent(dom_node),) + args
kw['_apply_whitespace_facet'] = True
apply_whitespace_facet = kw.pop('_apply_whitespace_facet', from_xml)
if (0 < len(args)) and isinstance(args[0], types.StringTypes) and apply_whitespace_facet:
cf_whitespace = getattr(cls, '_CF_whiteSpace', None)
if cf_whitespace is not None:
norm_str = unicode(cf_whitespace.normalizeString(args[0]))
args = (norm_str,) + args[1:]
kw['_from_xml'] = from_xml
return cls._ConvertArguments_vx(args, kw)
# Must override new, because new gets invoked before init, and usually
# doesn't accept keywords. In case it does (e.g., datetime.datetime),
# only remove the ones that would normally be interpreted by this class.
# Do the same argument conversion as is done in init. Trap errors and
# convert them to BadTypeValue errors.
#
# Note: We explicitly do not validate constraints here. That's
# done in the normal constructor; here, we might be in the process
# of building a value that eventually will be legal, but isn't
# yet.
def __new__ (cls, *args, **kw):
# PyXBFactoryKeywords
kw.pop('_validate_constraints', None)
kw.pop('_require_value', None)
kw.pop('_element', None)
kw.pop('_fallback_namespace', None)
kw.pop('_apply_attributes', None)
kw.pop('_nil', None)
# ConvertArguments will remove _element and _apply_whitespace_facet
dom_node = kw.get('_dom_node')
args = cls._ConvertArguments(args, kw)
kw.pop('_from_xml', dom_node is not None)
kw.pop('_location', None)
assert issubclass(cls, _TypeBinding_mixin)
try:
return super(simpleTypeDefinition, cls).__new__(cls, *args, **kw)
except ValueError:
raise pyxb.SimpleTypeValueError(cls, args)
except OverflowError:
raise pyxb.SimpleTypeValueError(cls, args)
# Validate the constraints after invoking the parent constructor,
# unless told not to.
def __init__ (self, *args, **kw):
"""Initialize a newly created STD instance.
Usually there is one positional argument, which is a value that can be
converted to the underlying Python type.
@keyword _validate_constraints: If True (default if validation is
enabled), the newly constructed value is checked against its
constraining facets.
@type _validate_constraints: C{bool}
@keyword _apply_attributes: If C{True} (default), any attributes
present in the keywords or DOM node are applied. Normally presence of
such an attribute should produce an error; when creating simple
content for a complex type we need the DOM node, but do not want to
apply the attributes, so we bypass the application.
"""
# PyXBFactoryKeywords
validate_constraints = kw.pop('_validate_constraints', self._validationConfig.forBinding)
require_value = kw.pop('_require_value', False)
# Save DOM node so we can pull attributes off it
dom_node = kw.get('_dom_node')
location = kw.get('_location')
if (location is None) and isinstance(dom_node, utility.Locatable_mixin):
location = dom_node._location()
apply_attributes = kw.pop('_apply_attributes', True)
# _ConvertArguments handles _dom_node and _apply_whitespace_facet
# TypeBinding_mixin handles _nil and _element
args = self._ConvertArguments(args, kw)
try:
super(simpleTypeDefinition, self).__init__(*args, **kw)
except OverflowError:
raise pyxb.SimpleTypeValueError(type(self), args)
if apply_attributes and (dom_node is not None):
self._setAttributesFromKeywordsAndDOM(kw, dom_node)
if require_value and (not self._constructedWithValue()):
if location is None:
location = self._location()
raise pyxb.SimpleContentAbsentError(self, location)
if validate_constraints and not kw.pop('_nil', False):
self.xsdConstraintsOK(location)
# The class attribute name used to store the reference to the STD
# component instance must be unique to the class, not to this base class.
# Otherwise we mistakenly believe we've already associated a STD instance
# with a class (e.g., xsd:normalizedString) when in fact it's associated
# with the superclass (e.g., xsd:string)
@classmethod
def __STDAttrName (cls):
return '_%s__SimpleTypeDefinition' % (cls.__name__,)
@classmethod
def _SimpleTypeDefinition (cls, std):
"""Set the L{pyxb.xmlschema.structures.SimpleTypeDefinition} instance
associated with this binding."""
attr_name = cls.__STDAttrName()
if hasattr(cls, attr_name):
old_value = getattr(cls, attr_name)
if old_value != std:
raise pyxb.LogicError('%s: Attempt to override existing STD %s with %s' % (cls, old_value.name(), std.name()))
setattr(cls, attr_name, std)
@classmethod
def SimpleTypeDefinition (cls):
"""Return the SimpleTypeDefinition instance for the given
class.
This should only be invoked when generating bindings. An STD must
have been associated with the class using L{_SimpleTypeDefinition}."""
attr_name = cls.__STDAttrName()
assert hasattr(cls, attr_name)
return getattr(cls, attr_name)
@classmethod
def XsdLiteral (cls, value):
"""Convert from a python value to a string usable in an XML
document.
This should be implemented in the subclass."""
raise pyxb.LogicError('%s does not implement XsdLiteral' % (cls,))
def xsdLiteral (self):
"""Return text suitable for representing the value of this
instance in an XML document.
The base class implementation delegates to the object class's
XsdLiteral method."""
if self._isNil():
return ''
return self.XsdLiteral(self)
@classmethod
def XsdSuperType (cls):
"""Find the nearest parent class in the PST hierarchy.
The value for anySimpleType is None; for all others, it's a
primitive or derived PST descendent (including anySimpleType)."""
for sc in cls.mro():
if sc == cls:
continue
if simpleTypeDefinition == sc:
# If we hit the PST base, this is a primitive type or
# otherwise directly descends from a Python type; return
# the recorded XSD supertype.
return cls._XsdBaseType
if issubclass(sc, simpleTypeDefinition):
return sc
raise pyxb.LogicError('No supertype found for %s' % (cls,))
@classmethod
def _XsdConstraintsPreCheck_vb (cls, value):
"""Pre-extended class method to verify other things before
checking constraints.
This is used for list types, to verify that the values in the
list are acceptable, and for token descendents, to check the
lexical/value space conformance of the input.
"""
super_fn = getattr(super(simpleTypeDefinition, cls), '_XsdConstraintsPreCheck_vb', lambda *a,**kw: value)
return super_fn(value)
# Cache of pre-computed sequences of class facets in the order required
# for constraint validation
__ClassFacetSequence = { }
@classmethod
def XsdConstraintsOK (cls, value, location=None):
"""Validate the given value against the constraints on this class.
@raise pyxb.SimpleTypeValueError: if any constraint is violated.
"""
value = cls._XsdConstraintsPreCheck_vb(value)
facet_values = cls.__ClassFacetSequence.get(cls)
if facet_values is None:
# Constraints for simple type definitions are inherited. Check them
# from least derived to most derived.
classes = [ _x for _x in cls.mro() if issubclass(_x, simpleTypeDefinition) ]
classes.reverse()
cache_result = True
facet_values = []
for clazz in classes:
# When setting up the datatypes, if we attempt to validate
# something before the facets have been initialized (e.g., a
# nonNegativeInteger used as a length facet for the parent
# integer datatype), just ignore that for now. Don't cache
# the value, though, since a subsequent check after
# initialization should succceed.
try:
clazz_facets = clazz._FacetMap().values()
except AttributeError:
cache_result = False
clazz_facets = []
for v in clazz_facets:
if not (v in facet_values):
facet_values.append(v)
if cache_result:
cls.__ClassFacetSequence[cls] = facet_values
for f in facet_values:
if not f.validateConstraint(value):
raise pyxb.SimpleFacetValueError(cls, value, f, location)
return value
def xsdConstraintsOK (self, location=None):
"""Validate the value of this instance against its constraints."""
return self.XsdConstraintsOK(self, location)
def _validateBinding_vx (self):
if not self._isNil():
self._checkValidValue()
return True
@classmethod
def XsdValueLength (cls, value):
"""Return the length of the given value.
The length is calculated by a subclass implementation of
_XsdValueLength_vx in accordance with
http://www.w3.org/TR/xmlschema-2/#rf-length.
The return value is a non-negative integer, or C{None} if length
constraints should be considered trivially satisfied (as with
QName and NOTATION).
@raise pyxb.LogicError: the provided value is not an instance of cls.
@raise pyxb.LogicError: an attempt is made to calculate a length for
an instance of a type that does not support length calculations.
"""
assert isinstance(value, cls)
if not hasattr(cls, '_XsdValueLength_vx'):
raise pyxb.LogicError('Class %s does not support length validation' % (cls.__name__,))
return cls._XsdValueLength_vx(value)
def xsdValueLength (self):
"""Return the length of this instance within its value space.
See XsdValueLength."""
return self.XsdValueLength(self)
@classmethod
def PythonLiteral (cls, value):
"""Return a string which can be embedded into Python source to
represent the given value as an instance of this class."""
class_name = cls.__name__
return '%s(%s)' % (class_name, repr(value))
def pythonLiteral (self):
"""Return a string which can be embedded into Python source to
represent the value of this instance."""
return self.PythonLiteral(self)
def _toDOM_csc (self, dom_support, parent):
assert parent is not None
dom_support.appendTextChild(self.xsdLiteral(), parent)
return getattr(super(simpleTypeDefinition, self), '_toDOM_csc', lambda *_args,**_kw: dom_support)(dom_support, parent)
@classmethod
def _IsSimpleTypeContent (cls):
"""STDs have simple type content."""
return True
@classmethod
def _IsValidValue (self, value):
try:
self._CheckValidValue(value)
return True
except pyxb.PyXBException:
pass
return False
@classmethod
def _CheckValidValue (cls, value):
"""NB: Invoking this on a value that is a list will, if necessary,
replace the members of the list with new values that are of the
correct item type. This is permitted because only with lists is it
possible to bypass the normal content validation (by invoking
append/extend on the list instance)."""
if value is None:
raise pyxb.SimpleTypeValueError(cls, value)
value_class = cls
if issubclass(cls, STD_list):
if not isinstance(value, collections.Iterable):
raise pyxb.SimpleTypeValueError(cls, value)
for v in value:
if not cls._ItemType._IsValidValue(v):
raise pyxb.SimpleListValueError(cls, v)
else:
if issubclass(cls, STD_union):
value_class = None
for mt in cls._MemberTypes:
if mt._IsValidValue(value):
value_class = mt
break
if value_class is None:
raise pyxb.SimpleUnionValueError(cls, value)
#if not (isinstance(value, value_class) or issubclass(value_class, type(value))):
if not isinstance(value, value_class):
raise pyxb.SimpleTypeValueError(cls, value)
value_class.XsdConstraintsOK(value)
def _checkValidValue (self):
self._CheckValidValue(self)
def _isValidValue (self):
self._IsValidValue(self)
def _setAttribute (self, attr_en, value_lex):
# Simple types have no attributes, but the parsing infrastructure
# might invoke this to delegate responsibility for notifying the user
# of the failure.
raise pyxb.AttributeOnSimpleTypeError(self, attr_en, value_lex)
@classmethod
def _description (cls, name_only=False, user_documentation=True):
name = cls._Name()
if name_only:
return name
desc = [ name, ' restriction of ', cls.XsdSuperType()._description(name_only=True) ]
if user_documentation and (cls._Documentation is not None):
desc.extend(["\n", cls._Documentation])
return ''.join(desc)
class STD_union (simpleTypeDefinition):
"""Base class for union datatypes.
This class descends only from simpleTypeDefinition. A pyxb.LogicError is
raised if an attempt is made to construct an instance of a subclass of
STD_union. Values consistent with the member types are constructed using
the Factory class method. Values are validated using the _ValidatedMember
class method.
Subclasses must provide a class variable _MemberTypes which is a
tuple of legal members of the union."""
_MemberTypes = None
"""A list of classes which are permitted as values of the union."""
# Ick: If we don't declare this here, this class's map doesn't get
# initialized. Alternative is to not descend from simpleTypeDefinition.
# @todo Ensure that pattern and enumeration are valid constraints
__FacetMap = {}
@classmethod
def Factory (cls, *args, **kw):
"""Given a value, attempt to create an instance of some member of this
union. The first instance which can be legally created is returned.
@keyword _validate_constraints: If C{True} (default if validation is
enabled), any constructed value is checked against constraints applied
to the union as well as the member type.
@raise pyxb.SimpleTypeValueError: no member type will permit creation of
an instance from the parameters in C{args} and C{kw}.
"""
used_cls = cls._SupersedingClass()
state = used_cls._PreFactory_vx(args, kw)
rv = None
# NB: get, not pop: preserve it for the member type invocations
validate_constraints = kw.get('_validate_constraints', cls._GetValidationConfig().forBinding)
assert isinstance(validate_constraints, bool)
if 0 < len(args):
arg = args[0]
try:
rv = cls._ValidatedMember(arg)
except pyxb.SimpleTypeValueError:
pass
if rv is None:
kw['_validate_constraints'] = True
for mt in cls._MemberTypes:
try:
rv = mt.Factory(*args, **kw)
break
except pyxb.SimpleTypeValueError:
pass
except (ValueError, OverflowError):
pass
except:
pass
location = None
if kw is not None:
location = kw.get('_location')
if rv is not None:
if validate_constraints:
cls.XsdConstraintsOK(rv, location)
rv._postFactory_vx(state)
return rv
# The constructor may take any number of arguments, so pass the whole thing.
# Should we also provide the keywords?
raise pyxb.SimpleUnionValueError(cls, args, location)
@classmethod
def _ValidatedMember (cls, value):
"""Validate the given value as a potential union member.
@raise pyxb.SimpleTypeValueError: the value is not an instance of a
member type."""
if not isinstance(value, cls._MemberTypes):
for mt in cls._MemberTypes:
try:
# Force validation so we get the correct type, otherwise
# first member will be accepted.
value = mt.Factory(value, _validate_constraints=True)
return value
except (TypeError, pyxb.SimpleTypeValueError):
pass
raise pyxb.SimpleUnionValueError(cls, value)
return value
def __new__ (self, *args, **kw):
raise pyxb.LogicError('%s: cannot construct instances of union' % (self.__class__.__name__,))
def __init__ (self, *args, **kw):
raise pyxb.LogicError('%s: cannot construct instances of union' % (self.__class__.__name__,))
@classmethod
def _description (cls, name_only=False, user_documentation=True):
name = cls._Name()
if name_only:
return name
desc = [ name, ', union of ']
desc.append(', '.join([ _td._description(name_only=True) for _td in cls._MemberTypes ]))
return ''.join(desc)
@classmethod
def XsdLiteral (cls, value):
"""Convert from a binding value to a string usable in an XML document."""
return cls._ValidatedMember(value).xsdLiteral()
class STD_list (simpleTypeDefinition, types.ListType):
"""Base class for collection datatypes.
This class descends from the Python list type, and incorporates
simpleTypeDefinition. Subclasses must define a class variable _ItemType
which is a reference to the class of which members must be instances."""
_ItemType = None
"""A reference to the binding class for items within this list."""
# Ick: If we don't declare this here, this class's map doesn't get
# initialized. Alternative is to not descend from simpleTypeDefinition.
__FacetMap = {}
@classmethod
def _ValidatedItem (cls, value, kw=None):
"""Verify that the given value is permitted as an item of this list.
This may convert the value to the proper type, if it is
compatible but not an instance of the item type. Returns the
value that should be used as the item, or raises an exception
if the value cannot be converted.
@param kw: optional dictionary of standard constructor keywords used
when exceptions must be built. In particular, C{_location} may be
useful.
"""
if isinstance(value, cls._ItemType):
pass
elif issubclass(cls._ItemType, STD_union):
value = cls._ItemType._ValidatedMember(value)
else:
try:
value = cls._ItemType(value)
except (pyxb.SimpleTypeValueError, TypeError):
location = None
if kw is not None:
location = kw.get('_location')
raise pyxb.SimpleListValueError(cls, value, location)
return value
@classmethod
def _ConvertArguments_vx (cls, args, kw):
# If the first argument is a string, split it on spaces and use the
# resulting list of tokens.
if 0 < len(args):
arg1 = args[0]
if isinstance(arg1, types.StringTypes):
args = (arg1.split(),) + args[1:]
arg1 = args[0]
if isinstance(arg1, collections.Iterable):
new_arg1 = [ cls._ValidatedItem(_v, kw) for _v in arg1 ]
args = (new_arg1,) + args[1:]
super_fn = getattr(super(STD_list, cls), '_ConvertArguments_vx', lambda *a,**kw: args)
return super_fn(args, kw)
@classmethod
def _XsdValueLength_vx (cls, value):
return len(value)
@classmethod
def XsdLiteral (cls, value):
"""Convert from a binding value to a string usable in an XML document."""
return ' '.join([ cls._ItemType.XsdLiteral(_v) for _v in value ])
@classmethod
def _description (cls, name_only=False, user_documentation=True):
name = cls._Name()
if name_only:
return name
desc = [ name, ', list of ', cls._ItemType._description(name_only=True) ]
return ''.join(desc)
# Convert a single value to the required type, if not already an instance
@classmethod
def __ConvertOne (cls, v):
return cls._ValidatedItem(v)
# Convert a sequence of values to the required type, if not already instances
def __convertMany (self, values):
return [ self._ValidatedItem(_v) for _v in values ]
def __setitem__ (self, key, value):
if isinstance(key, slice):
super(STD_list, self).__setitem__(key, self.__convertMany(value))
else:
super(STD_list, self).__setitem__(key, self._ValidatedItem(value))
#!python3>! required for python2 since deriving from built-in type
def __setslice__ (self, start, end, values):
super(STD_list, self).__setslice__(start, end, self.__convertMany(values))
#!python3<!
def __contains__ (self, item):
return super(STD_list, self).__contains__(self._ValidatedItem(item))
# Standard mutable sequence methods, per Python Library Reference "Mutable Sequence Types"
def append (self, x):
super(STD_list, self).append(self._ValidatedItem(x))
def extend (self, x, _from_xml=False):
super(STD_list, self).extend(self.__convertMany(x))
def count (self, x):
return super(STD_list, self).count(self._ValidatedItem(x))
def index (self, x, *args):
return super(STD_list, self).index(self._ValidatedItem(x), *args)
def insert (self, i, x):
super(STD_list, self).insert(i, self._ValidatedItem(x))
def remove (self, x):
super(STD_list, self).remove(self._ValidatedItem(x))
class element (utility._DeconflictSymbols_mixin, _DynamicCreate_mixin):
"""Class that represents a schema element within a binding.
This gets a little confusing. Within a schema, the
L{pyxb.xmlschema.structures.ElementDeclaration} type represents an
U{element
declaration<http://www.w3.org/TR/xmlschema-1/#cElement_Declarations>}.
Those declarations may be global (have a name that is visible in the
namespace), or local (have a name that is visible only within a complex
type definition). Further, local (but not global) declarations may have a
reference to a global declaration (which might be in a different
namespace).
Within a PyXB binding, the element declarations from the original complex
type definition that have the same
U{QName<http://www.w3.org/TR/1999/REC-xml-names-19990114/#dt-qname>}
(after deconflicting the
U{LocalPart<http://www.w3.org/TR/1999/REC-xml-names-19990114/#NT-LocalPart>})
are associated with an attribute in the class for the complex type. Each
of these attributes is defined via a
L{pyxb.binding.content.ElementDeclaration} which provides the mechanism by
which the binding holds values associated with that element.
Furthermore, in the FAC-based content model each schema element
declaration is associated with an
L{ElementUse<pyxb.binding.content.ElementUse>} instance to locate the
point in the schema where content came from. Instances that refer to the
same schema element declaration share the same underlying
L{pyxb.binding.content.ElementDeclaration}.
This element isn't any of those elements. This element is the type used
for an attribute which associates the name of a element with data required
to represent it, all within a particular scope (a module for global scope,
the binding class for a complex type definition for local scope). From
the perspective of a PyXB user they look almost like a class, in that you
can call them to create instances of the underlying complex type.
Global and local elements are represented by instances of this class.
"""
def name (self):
"""The expanded name of the element within its scope."""
return self.__name
__name = None
def typeDefinition (self):
"""The L{_TypeBinding_mixin} subclass for values of this element."""
return self.__typeDefinition._SupersedingClass()
__typeDefinition = None
def xsdLocation (self):
"""The L{pyxb.utils.utility.Location} where the element appears in the schema."""
return self.__xsdLocation
__xsdLocation = None
def scope (self):
"""The scope of the element. This is either C{None}, representing a
top-level element, or an instance of C{complexTypeDefinition} for
local elements."""
return self.__scope
__scope = None
def nillable (self):
"""Indicate whether values matching this element can have U{nil
<http://www.w3.org/TR/xmlschema-1/#xsi_nil>} set."""
return self.__nillable
__nillable = False
def abstract (self):
"""Indicate whether this element is abstract (must use substitution
group members for matches)."""
return self.__abstract
__abstract = False
def documentation (self):
"""Contents of any documentation annotation in the definition."""
return self.__documentation
__documentation = None
def defaultValue (self):
"""The default value of the element.
C{None} if the element has no default value.
@note: A non-C{None} value is always an instance of a simple type,
even if the element has complex content."""
return self.__defaultValue
__defaultValue = None
def fixed (self):
"""C{True} if the element content cannot be changed"""
return self.__fixed
__fixed = False
def substitutionGroup (self):
"""The L{element} instance to whose substitution group this element
belongs. C{None} if this element is not part of a substitution
group."""
return self.__substitutionGroup
def _setSubstitutionGroup (self, substitution_group):
self.__substitutionGroup = substitution_group
if substitution_group is not None:
self.substitutesFor = self._real_substitutesFor
return self
__substitutionGroup = None
def findSubstituendDecl (self, ctd_class):
ed = ctd_class._ElementMap.get(self.name())
if ed is not None:
return ed
if self.substitutionGroup() is None:
return None
return self.substitutionGroup().findSubstituendDecl(ctd_class)
def _real_substitutesFor (self, other):
"""Determine whether an instance of this element can substitute for the other element.
See U{Substitution Group OK<http://www.w3.org/TR/xmlschema-1/#cos-equiv-derived-ok-rec>}.
@todo: Do something about blocking constraints. This ignores them, as
does everything leading to this point.
"""
if self.substitutionGroup() is None:
return False
if other is None:
return False
assert isinstance(other, element)
# On the first call, other is likely to be the local element. We need
# the global one.
if other.scope() is not None:
other = other.name().elementBinding()
if other is None:
return False
assert other.scope() is None
# Do both these refer to the same (top-level) element?
if self.name().elementBinding() == other:
return True
return (self.substitutionGroup() == other) or self.substitutionGroup().substitutesFor(other)
def substitutesFor (self, other):
"""Stub replaced by _real_substitutesFor when element supports substitution groups."""
return False
def memberElement (self, name):
"""Return a reference to the element instance used for the given name
within this element.
The type for this element must be a complex type definition."""
return self.typeDefinition()._UseForTag(name).elementBinding()
def __init__ (self, name, type_definition, scope=None, nillable=False, abstract=False, unicode_default=None, fixed=False, substitution_group=None, documentation=None, location=None):
"""Create a new element binding.
"""
assert isinstance(name, pyxb.namespace.ExpandedName)
self.__name = name
self.__typeDefinition = type_definition
self.__scope = scope
self.__nillable = nillable
self.__abstract = abstract
if unicode_default is not None:
# Override default None. If this is a complex type with simple
# content, use the underlying simple type value.
self.__defaultValue = self.__typeDefinition.Factory(unicode_default, _from_xml=True)
if isinstance(self.__defaultValue, complexTypeDefinition):
self.__defaultValue = self.__defaultValue.value()
self.__fixed = fixed
self.__substitutionGroup = substitution_group
self.__documentation = documentation
self.__xsdLocation = location
super(element, self).__init__()
def __call__ (self, *args, **kw):
"""Invoke the Factory method on the type associated with this element.
@keyword _dom_node: This keyword is removed. If present, it must be C{None}.
@note: Other keywords are passed to L{_TypeBinding_mixin.Factory}.
@raise pyxb.AbstractElementError: This element is abstract and no DOM
node was provided.
"""
dom_node = kw.pop('_dom_node', None)
assert dom_node is None, 'Cannot pass DOM node directly to element constructor; use createFromDOM'
if '_element' in kw:
raise pyxb.LogicError('Cannot set _element in element-based instance creation')
kw['_element'] = self
# Can't create instances of abstract elements.
if self.abstract():
location = kw.get('_location')
if (location is None) and isinstance(dom_node, utility.Locatable_mixin):
location = dom_node._location()
raise pyxb.AbstractElementError(self, location, args)
if self.__defaultValue is not None:
if 0 == len(args):
# No initial value; use the default
args = [ self.__defaultValue ]
elif self.__fixed:
# Validate that the value is consistent with the fixed value
if 1 < len(args):
raise ValueError(*args)
args = [ self.compatibleValue(args[0], **kw) ]
rv = self.typeDefinition().Factory(*args, **kw)
rv._setElement(self)
return rv
def compatibleValue (self, value, **kw):
"""Return a variant of the value that is compatible with this element.
This mostly defers to L{_TypeBinding_mixin._CompatibleValue}.
@raise pyxb.SimpleTypeValueError: if the value is not both
type-consistent and value-consistent with the element's type.
"""
# None is always None, unless there's a default.
if value is None:
return self.__defaultValue
is_plural = kw.pop('is_plural', False)
if is_plural:
if not isinstance(value, collections.Iterable):
raise pyxb.SimplePluralValueError(self.typeDefinition(), value)
return [ self.compatibleValue(_v) for _v in value ]
if self.__fixed and (value != self.__defaultValue):
raise pyxb.ElementChangeError(self, value)
if isinstance(value, _TypeBinding_mixin) and (value._element() is not None) and value._element().substitutesFor(self):
return value
if self.abstract():
location = None
if isinstance(value, utility.Locatable_mixin):
location = value._location()
raise pyxb.AbstractElementError(self, location, value)
return self.typeDefinition()._CompatibleValue(value, **kw)
@classmethod
def CreateDOMBinding (cls, node, element_binding, **kw):
"""Create a binding from a DOM node.
@param node: The DOM node
@param element_binding: An instance of L{element} that would normally
be used to determine the type of the binding. The actual type of
object returned is determined by the type definition associated with
the C{element_binding} and the value of any U{xsi:type
<http://www.w3.org/TR/xmlschema-1/#xsi_type>} attribute found in
C{node}, modulated by
L{XSI._InterpretTypeAttribute<pyxb.namespace.builtin._XMLSchema_instance._InterpretTypeAttribute>}.
@keyword _fallback_namespace: The namespace to use as the namespace for
the node, if the node name is unqualified. This should be an absent
namespace.
@return: A binding for the DOM node.
@raises pyxb.UnrecognizedDOMRootNodeError: if no underlying element or
type for the node can be identified.
"""
if xml.dom.Node.ELEMENT_NODE != node.nodeType:
raise ValueError('node is not an element')
fallback_namespace = kw.get('_fallback_namespace')
# Record the element to be associated with the created binding
# instance.
if '_element' in kw:
raise pyxb.LogicError('Cannot set _element in element-based instance creation')
type_class = None
if element_binding is not None:
# Can't create instances of abstract elements. @todo: Is there any
# way this could be legal given an xsi:type attribute? I'm pretty
# sure "no"...
if element_binding.abstract():
location = kw.get('location')
if (location is None) and isinstance(node, utility.Locatable_mixin):
location = node._location()
raise pyxb.AbstractElementError(element_binding, location, node)
kw['_element'] = element_binding
type_class = element_binding.typeDefinition()
# Get the namespace context for the value being created. If none is
# associated, one will be created. Do not make assumptions about the
# namespace context; if the user cared, she should have assigned a
# context before calling this.
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node)
(did_replace, type_class) = XSI._InterpretTypeAttribute(XSI.type.getAttribute(node), ns_ctx, fallback_namespace, type_class)
if type_class is None:
raise pyxb.UnrecognizedDOMRootNodeError(node)
# Pass xsi:nil on to the constructor regardless of whether the element
# is nillable. Another sop to SOAP-encoding WSDL fans who don't
# bother to provide valid schema for their message content.
is_nil = XSI.nil.getAttribute(node)
if is_nil is not None:
kw['_nil'] = pyxb.binding.datatypes.boolean(is_nil)
rv = type_class.Factory(_dom_node=node, **kw)
assert rv._element() == element_binding
rv._setNamespaceContext(pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node))
return rv._postDOMValidate()
# element
@classmethod
def AnyCreateFromDOM (cls, node, fallback_namespace):
"""Create an instance of an element from a DOM node.
This method does minimal processing of C{node} and delegates to
L{CreateDOMBinding}.
@param node: An C{xml.dom.Node} representing a root element. If the
node is a document, that document's root node will be substituted.
The name of the node is extracted as the name of the element to be
created, and the node and the name are passed to L{CreateDOMBinding}.
@param fallback_namespace: The value to pass as C{_fallback_namespace}
to L{CreateDOMBinding}
@return: As with L{CreateDOMBinding}"""
if xml.dom.Node.DOCUMENT_NODE == node.nodeType:
node = node.documentElement
expanded_name = pyxb.namespace.ExpandedName(node, fallback_namespace=fallback_namespace)
return cls.CreateDOMBinding(node, expanded_name.elementBinding(), _fallback_namespace=fallback_namespace)
def elementForName (self, name):
"""Return the element that should be used if this element binding is
permitted and an element with the given name is encountered.
Normally, the incoming name matches the name of this binding, and
C{self} is returned. If the incoming name is different, it is
expected to be the name of a global element which is within this
element's substitution group. In that case, the binding corresponding
to the named element is return.
@return: An instance of L{element}, or C{None} if no element with the
given name can be found.
"""
# Name match means OK.
if self.name() == name:
return self
# No name match means only hope is a substitution group, for which the
# element must be top-level.
top_elt = self.name().elementBinding()
if top_elt is None:
return None
# Members of the substitution group must also be top-level. NB: If
# named_elt == top_elt, then the adoptName call below improperly
# associated the global namespace with a local element of the same
# name; cf. test-namespace-uu:testBad.
elt_en = top_elt.name().adoptName(name)
assert 'elementBinding' in elt_en.namespace()._categoryMap(), 'No element bindings in %s' % (elt_en.namespace(),)
named_elt = elt_en.elementBinding()
if (named_elt is None) or (named_elt == top_elt):
return None
if named_elt.substitutesFor(top_elt):
return named_elt
return None
def createFromDOM (self, node, fallback_namespace=None, **kw):
"""Create an instance of this element using a DOM node as the source
of its content.
This method does minimal processing of C{node} and delegates to
L{_createFromDOM}.
@param node: An C{xml.dom.Node} representing a root element. If the
node is a document, that document's root node will be substituted.
The name of the node is extracted as the name of the element to be
created, and the node and the name are passed to L{_createFromDOM}
@keyword fallback_namespace: Used as default for
C{_fallback_namespace} in call to L{_createFromDOM}
@note: Keyword parameters are passed to L{CreateDOMBinding}.
@return: As with L{_createFromDOM}
"""
if xml.dom.Node.DOCUMENT_NODE == node.nodeType:
node = node.documentElement
if fallback_namespace is not None:
kw.setdefault('_fallback_namespace', fallback_namespace)
expanded_name = pyxb.namespace.ExpandedName(node, fallback_namespace=fallback_namespace)
return self._createFromDOM(node, expanded_name, **kw)
def _createFromDOM (self, node, expanded_name, **kw):
"""Create an instance from a DOM node given the name of an element.
This method does minimal processing of C{node} and C{expanded_name}
and delegates to L{CreateDOMBinding}.
@param node: An C{xml.dom.Node} representing a root element. If the
node is a document, that document's root node will be substituted.
The value is passed to L{CreateDOMBinding}.
@param expanded_name: The expanded name of the element to be used for
content. This is passed to L{elementForName} to obtain the binding
that is passed to L{CreateDOMBinding}, superseding any identification
that might be inferred from C{node}. If no name is available, use
L{createFromDOM}.
@note: Keyword parameters are passed to L{CreateDOMBinding}.
@return: As with L{CreateDOMBinding}.
"""
if xml.dom.Node.DOCUMENT_NODE == node.nodeType:
node = node.documentElement
return element.CreateDOMBinding(node, self.elementForName(expanded_name), **kw)
def __str__ (self):
return 'Element %s' % (self.name(),)
def _description (self, name_only=False, user_documentation=True):
name = unicode(self.name())
if name_only:
return name
desc = [ name, ' (', self.typeDefinition()._description(name_only=True), ')' ]
if self.scope() is not None:
desc.extend([', local to ', self.scope()._description(name_only=True) ])
if self.nillable():
desc.append(', nillable')
if self.substitutionGroup() is not None:
desc.extend([', substitutes for ', self.substitutionGroup()._description(name_only=True) ])
if user_documentation and (self.documentation() is not None):
desc.extend(["\n", self.documentation() ])
return u''.join(desc)
class enumeration_mixin (pyxb.cscRoot):
"""Marker in case we need to know that a PST has an enumeration constraint facet."""
@classmethod
def values (cls):
"""Return a list of values that the enumeration can take."""
return cls._CF_enumeration.values()
@classmethod
def itervalues (cls):
"""Return a generator for the values that the enumeration can take."""
return cls._CF_enumeration.itervalues()
@classmethod
def items (cls):
"""Return the associated L{pyxb.binding.facet._EnumerationElement} instances."""
return cls._CF_enumeration.items()
@classmethod
def iteritems (cls):
"""Generate the associated L{pyxb.binding.facet._EnumerationElement} instances."""
return cls._CF_enumeration.iteritems()
class _Content (object):
"""Base for any wrapper added to L{complexTypeDefinition.orderedContent}."""
def __getValue (self):
"""The value of the content.
This is a unicode string for L{NonElementContent}, and (ideally) an
instance of L{_TypeBinding_mixin} for L{ElementContent}."""
return self.__value
__value = None
value = property(__getValue)
@classmethod
def ContentIterator (cls, input):
"""Return an iterator that filters and maps a sequence of L{_Content}
instances.
The returned iterator will filter out sequence members that are not
instances of the class from which the iterator was created. Further,
only the L{value} field of the sequence member is returned.
Thus the catenated text of the non-element content of an instance can
be obtained with::
text = u''.join(NonElementContent.ContentIterator(instance.orderedContent()))
See also L{pyxb.NonElementContent}
"""
class _Iterator:
def __init__ (self, input):
self.__input = iter(input)
def __iter__ (self):
return self
def next (self):
while True:
content = self.__input.next()
if isinstance(content, cls):
return content.value
return _Iterator(input)
def __init__ (self, value):
self.__value = value
class ElementContent (_Content):
"""Marking wrapper for element content.
The value should be translated into XML and made a child of its parent."""
def __getElementDeclaration (self):
"""The L{pyxb.binding.content.ElementDeclaration} associated with the element content.
This may be C{None} if the value is a wildcard."""
return self.__elementDeclaration
__elementDeclaration = None
elementDeclaration = property(__getElementDeclaration)
def __init__ (self, value, element_declaration=None, instance=None, tag=None):
"""Create a wrapper associating a value with its element declaration.
Normally the element declaration is determined by consulting the
content model when creating a binding instance. When manipulating the
preferred content list, this may be inconvenient to obtain; in that case
provide the C{instance} in which the content appears immediately,
along with the C{tag} that is used for the Python attribute that holds
the element.
@param value: the value of the element. Should be an instance of
L{_TypeBinding_mixin}, but for simple types might be a Python native
type.
@keyword element_declaration: The
L{pyxb.binding.content.ElementDeclaration} associated with the element
value. Should be C{None} if the element matches wildcard content.
@keyword instance: Alternative path to providing C{element_declaration}
@keyword tag: Alternative path to providing C{element_declaration}
"""
import pyxb.binding.content
super(ElementContent, self).__init__(value)
if instance is not None:
if not isinstance(instance, complexTypeDefinition):
raise pyxb.UsageError('Unable to determine element declaration')
element_declaration = instance._UseForTag(tag)
assert (element_declaration is None) or isinstance(element_declaration, pyxb.binding.content.ElementDeclaration)
self.__elementDeclaration = element_declaration
class NonElementContent (_Content):
"""Marking wrapper for non-element content.
The value will be unicode text, and should be appended as character
data."""
def __init__ (self, value):
super(NonElementContent, self).__init__(unicode(value))
class complexTypeDefinition (_TypeBinding_mixin, utility._DeconflictSymbols_mixin, _DynamicCreate_mixin):
"""Base for any Python class that serves as the binding for an
XMLSchema complexType.
Subclasses should define a class-level _AttributeMap variable which maps
from the unicode tag of an attribute to the AttributeUse instance that
defines it. Similarly, subclasses should define an _ElementMap variable.
"""
_CT_EMPTY = 'EMPTY' #<<< No content
_CT_SIMPLE = 'SIMPLE' #<<< Simple (character) content
_CT_MIXED = 'MIXED' #<<< Children may be elements or other (e.g., character) content
_CT_ELEMENT_ONLY = 'ELEMENT_ONLY' #<<< Expect only element content.
_ContentTypeTag = None
_TypeDefinition = None
"""Subclass of simpleTypeDefinition that corresponds to the type content.
Only valid if _ContentTypeTag is _CT_SIMPLE"""
# A value that indicates whether the content model for this type supports
# wildcard elements. Supporting classes should override this value.
_HasWildcardElement = False
# Map from expanded names to ElementDeclaration instances
_ElementMap = { }
"""Map from expanded names to ElementDeclaration instances."""
# Per-instance map from tags to attribute values for wildcard attributes.
# Value is C{None} if the type does not support wildcard attributes.
__wildcardAttributeMap = None
def wildcardAttributeMap (self):
"""Obtain access to wildcard attributes.
The return value is C{None} if this type does not support wildcard
attributes. If wildcard attributes are allowed, the return value is a
map from QNames to the unicode string value of the corresponding
attribute.
@todo: The map keys should be namespace extended names rather than
QNames, as the in-scope namespace may not be readily available to the
user.
"""
return self.__wildcardAttributeMap
# Per-instance list of DOM nodes interpreted as wildcard elements.
# Value is None if the type does not support wildcard elements.
__wildcardElements = None
def wildcardElements (self):
"""Obtain access to wildcard elements.
The return value is C{None} if the content model for this type does not
support wildcard elements. If wildcard elements are allowed, the
return value is a list of values corresponding to conformant
unrecognized elements, in the order in which they were encountered.
If the containing binding was created from an XML document and enough
information was present to determine the binding of the member
element, the value is a binding instance. Otherwise, the value is the
original DOM Element node.
"""
return self.__wildcardElements
def __init__ (self, *args, **kw):
"""Create a new instance of this binding.
Arguments are used as transition values along the content model.
Keywords are passed to the constructor of any simple content, or used
to initialize attribute and element values whose L{id
<content.ElementDeclaration.id>} (not L{name <content.ElementDeclaration.name>})
matches the keyword.
@keyword _dom_node: The node to use as the source of binding content.
@type _dom_node: C{xml.dom.Element}
@keyword _location: An optional instance of
L{pyxb.utils.utility.Location} showing the origin the binding. If
C{None}, a value from C{_dom_node} is used if available.
@keyword _from_xml: See L{_TypeBinding_mixin.Factory}
@keyword _finalize_content_model: If C{True} the constructor invokes
L{_TypeBinding_mixin._finalizeContentModel} prior to return. The
value defaults to C{False} when content is assigned through keyword
parameters (bypassing the content model) or neither a C{_dom_node} nor
positional element parameters have been provided, and to C{True} in
all other cases.
"""
fallback_namespace = kw.pop('_fallback_namespace', None)
is_nil = False
dom_node = kw.pop('_dom_node', None)
location = kw.pop('_location', None)
from_xml = kw.pop('_from_xml', dom_node is not None)
do_finalize_content_model = kw.pop('_finalize_content_model', None)
if dom_node is not None:
if (location is None) and isinstance(dom_node, pyxb.utils.utility.Locatable_mixin):
location = dom_node._location()
if xml.dom.Node.DOCUMENT_NODE == dom_node.nodeType:
dom_node = dom_node.documentElement
#kw['_validate_constraints'] = False
is_nil = XSI.nil.getAttribute(dom_node)
if is_nil is not None:
is_nil = kw['_nil'] = pyxb.binding.datatypes.boolean(is_nil)
if location is not None:
self._setLocation(location)
if self._AttributeWildcard is not None:
self.__wildcardAttributeMap = { }
if self._HasWildcardElement:
self.__wildcardElements = []
if self._Abstract:
raise pyxb.AbstractInstantiationError(type(self), location, dom_node)
super(complexTypeDefinition, self).__init__(**kw)
self.reset()
self._setAttributesFromKeywordsAndDOM(kw, dom_node)
did_set_kw_elt = False
for fu in self._ElementMap.itervalues():
iv = kw.pop(fu.id(), None)
if iv is not None:
did_set_kw_elt = True
fu.set(self, iv)
if do_finalize_content_model is None:
do_finalize_content_model = not did_set_kw_elt
if kw and kw.pop('_strict_keywords', True):
[ kw.pop(_fkw, None) for _fkw in self._PyXBFactoryKeywords ]
if kw:
raise pyxb.UnprocessedKeywordContentError(self, kw)
if 0 < len(args):
if did_set_kw_elt:
raise pyxb.UsageError('Cannot mix keyword and positional args for element initialization')
self.extend(args, _from_xml=from_xml, _location=location)
elif self._CT_SIMPLE == self._ContentTypeTag:
value = self._TypeDefinition.Factory(_require_value=not self._isNil(), _dom_node=dom_node, _location=location, _nil=self._isNil(), _apply_attributes=False, *args)
if value._constructedWithValue():
self.append(value)
elif dom_node is not None:
self.extend(dom_node.childNodes[:], fallback_namespace)
else:
do_finalize_content_model = False
if do_finalize_content_model:
self._finalizeContentModel()
# Specify the symbols to be reserved for all CTDs.
_ReservedSymbols = _TypeBinding_mixin._ReservedSymbols.union(set([ 'wildcardElements', 'wildcardAttributeMap',
'xsdConstraintsOK', 'content', 'orderedContent', 'append', 'extend', 'value', 'reset' ]))
# None, or a reference to a pyxb.utils.fac.Automaton instance that defines
# the content model for the type.
_Automaton = None
@classmethod
def _AddElement (cls, element):
"""Method used by generated code to associate the element binding with a use in this type.
This is necessary because all complex type classes appear in the
module prior to any of the element instances (which reference type
classes), so the association must be formed after the element
instances are available."""
return cls._UseForTag(element.name())._setElementBinding(element)
@classmethod
def _UseForTag (cls, tag, raise_if_fail=True):
"""Return the ElementDeclaration object corresponding to the element name.
@param tag: The L{ExpandedName} of an element in the class."""
try:
rv = cls._ElementMap[tag]
except KeyError:
if raise_if_fail:
raise
rv = None
return rv
def __childrenForDOM (self):
"""Generate a list of children in the order in which they should be
added to the parent when creating a DOM representation of this
object.
@note: This is only used when L{pyxb.RequireValidWhenGenerating} has
disabled validation. Consequently, it may not generate valid XML.
"""
order = []
for ed in self._ElementMap.itervalues():
value = ed.value(self)
if value is None:
continue
if isinstance(value, list) and ed.isPlural():
order.extend([ ElementContent(_v, ed) for _v in value ])
continue
order.append(ElementContent(value, ed))
return order
def _validatedChildren (self):
"""Provide the child elements and non-element content in an order
consistent with the content model.
Returns a sequence of tuples representing a valid path through the
content model where each transition corresponds to one of the member
element instances within this instance. The tuple is a pair
comprising the L{content.ElementDeclaration} instance and the value for the
transition.
If the content of the instance does not validate against the content
model, an exception is raised.
@return: C{None} or a list as described above.
"""
if self._ContentTypeTag in (self._CT_EMPTY, self._CT_SIMPLE):
return []
self._resetAutomaton()
return self.__automatonConfiguration.sequencedChildren()
def _symbolSet (self):
"""Return a map from L{content.ElementDeclaration} instances to a list of
values associated with that use.
This is used as the set of symbols available for transitions when
validating content against a model. Note that the original
L{content.ElementUse} that may have validated the assignment of the
symbol to the content is no longer available, which may result in a
different order being generated by the content model. Preservation of
the original order mitigates this risk.
The value C{None} is used to provide the wildcard members, if any.
If an element use has no associated values, it must not appear in the
returned map.
@raise pyxb.SimpleTypeValueError: when unable to convert element
content to the binding declaration type.
"""
rv = { }
for eu in self._ElementMap.itervalues():
value = eu.value(self)
if value is None:
continue
converter = eu.elementBinding().compatibleValue
if eu.isPlural():
if 0 < len(value):
rv[eu] = [ converter(_v) for _v in value ]
else:
rv[eu] = [ converter(value)]
wce = self.__wildcardElements
if (wce is not None) and (0 < len(wce)):
rv[None] = wce[:]
return rv
def _validateAttributes (self):
for au in self._AttributeMap.itervalues():
au.validate(self)
def _validateBinding_vx (self):
if self._isNil():
if (self._IsSimpleTypeContent() and (self.__content is not None)) or self.__content:
raise pyxb.ContentInNilInstanceError(self, self.__content)
return True
if self._IsSimpleTypeContent() and (self.__content is None):
raise pyxb.SimpleContentAbsentError(self, self._location())
order = self._validatedChildren()
for content in order:
if isinstance (content, NonElementContent):
continue
if isinstance(content.value, _TypeBinding_mixin):
content.value.validateBinding()
elif content.elementDeclaration is not None:
_log.warning('Cannot validate value %s in field %s', content.value, content.elementDeclaration.id())
self._validateAttributes()
return True
def _setAttribute (self, attr_en, value_lex):
au = self._AttributeMap.get(attr_en)
if au is None:
if self._AttributeWildcard is None:
raise pyxb.UnrecognizedAttributeError(type(self), attr_en, self)
self.__wildcardAttributeMap[attr_en] = value_lex
else:
au.set(self, value_lex, from_xml=True)
return au
def xsdConstraintsOK (self, location=None):
"""Validate the content against the simple type.
@return: C{True} if the content validates against its type.
@raise pyxb.NotSimpleContentError: this type does not have simple content.
@raise pyxb.SimpleContentAbsentError: the content of this type has not been set
"""
# @todo: type check
if self._CT_SIMPLE != self._ContentTypeTag:
raise pyxb.NotSimpleContentError(self)
if self._isNil():
return True
if self.__content is None:
if location is None:
location = self._location()
raise pyxb.SimpleContentAbsentError(self, location)
return self.value().xsdConstraintsOK(location)
# __content is used in two ways: when complex content is used, it is as
# documented in L{orderedContent}. When simple content is used, it is as
# documented in L{value}.
__content = None
def orderedContent (self):
"""Return the element and non-element content of the instance in order.
This must be a complex type with complex content. The return value is
a list of the element and non-element content in a preferred order.
The returned list contains L{element<ElementContent>} and
L{non-element<NonElementContent>} content in the order which it was
added to the instance. This may have been through parsing a document,
constructing an instance using positional arguments, invoking the
L{append} or L{extend} methods, or assigning directly to an instance
attribute associated with an element binding.
@note: Be aware that assigning directly to an element attribute does not
remove any previous value for the element from the content list.
@note: Be aware that element values directly appended to an instance
attribute with list type (viz., that corresponds to an element that
allows more than one occurrence) will not appear in the ordered
content list.
The order in the list may influence the generation of documents
depending on L{pyxb.ValidationConfig} values that apply to an
instance. Non-element content is emitted immediately prior to the
following element in this list. Any trailing non-element content is
emitted after the last element in the content. The list should
include all element content. Element content in this list that is not
present within an element member of the binding instance may result in
an error, or may be ignored.
@note: The returned value is mutable, allowing the caller to change
the order to be used.
@raise pyxb.NotComplexContentError: this is not a complex type with mixed or element-only content
"""
if self._ContentTypeTag in (self._CT_EMPTY, self._CT_SIMPLE):
raise pyxb.NotComplexContentError(self)
return self.__content
@classmethod
def __WarnOnContent (cls):
if cls.__NeedWarnOnContent:
import traceback
cls.__NeedWarnOnContent = False
_log.warning('Deprecated complexTypeDefinition method "content" invoked\nPlease use "orderedContent"\n%s', ''.join(traceback.format_stack()[:-2]))
pass
__NeedWarnOnContent = True
def content (self):
"""Legacy interface for ordered content.
This version does not accurately distinguish non-element content from
element content that happens to have unicode type.
@deprecated: use L{orderedContent}."""
self.__WarnOnContent()
if self._ContentTypeTag in (self._CT_EMPTY, self._CT_SIMPLE):
raise pyxb.NotComplexContentError(self)
return [ _v.value for _v in self.__content ]
def value (self):
"""Return the value of the element.
This must be a complex type with simple content. The returned value
is expected to be an instance of some L{simpleTypeDefinition} class.
@raise pyxb.NotSimpleContentError: this is not a complex type with simple content
"""
if self._CT_SIMPLE != self._ContentTypeTag:
raise pyxb.NotSimpleContentError(self)
return self.__content
def _resetContent (self, reset_elements=False):
if reset_elements:
for eu in self._ElementMap.itervalues():
eu.reset(self)
nv = None
if self._ContentTypeTag in (self._CT_MIXED, self._CT_ELEMENT_ONLY):
nv = []
return self.__setContent(nv)
__automatonConfiguration = None
def _resetAutomaton (self):
if self._Automaton is not None:
if self.__automatonConfiguration is None:
import pyxb.binding.content
self.__automatonConfiguration = pyxb.binding.content.AutomatonConfiguration(self)
self.__automatonConfiguration.reset()
return self.__automatonConfiguration
def _automatonConfiguration (self):
"""For whitebox testing use only"""
return self.__automatonConfiguration
def reset (self):
"""Reset the instance.
This resets all element and attribute fields, and discards any
recorded content. It resets the content model automaton to its
initial state.
@see: Manipulate the return value of L{orderedContent} if your intent is
to influence the generation of documents from the binding instance
without changing its (element) content.
"""
self._resetContent(reset_elements=True)
for au in self._AttributeMap.itervalues():
au.reset(self)
self._resetAutomaton()
return self
@classmethod
def _ElementBindingDeclForName (cls, element_name):
"""Determine what the given name means as an element in this type.
Normally, C{element_name} identifies an element definition within this
type. If so, the returned C{element_decl} identifies that definition,
and the C{element_binding} is extracted from that use.
It may also be that the C{element_name} does not appear as an element
definition, but that it identifies a global element. In that case,
the returned C{element_binding} identifies the global element. If,
further, that element is a member of a substitution group which does
have an element definition in this class, then the returned
C{element_decl} identifies that definition.
If a non-C{None} C{element_decl} is returned, there will be an
associated C{element_binding}. However, it is possible to return a
non-C{None} C{element_binding}, but C{None} as the C{element_decl}. In
that case, the C{element_binding} can be used to create a binding
instance, but the content model will have to treat it as a wildcard.
@param element_name: The name of the element in this type, either an
expanded name or a local name if the element has an absent namespace.
@return: C{( element_binding, element_decl )}
"""
element_decl = cls._ElementMap.get(element_name)
element_binding = None
if element_decl is None:
try:
element_binding = element_name.elementBinding()
except pyxb.NamespaceError:
pass
if element_binding is not None:
element_decl = element_binding.findSubstituendDecl(cls)
else:
element_binding = element_decl.elementBinding()
return (element_binding, element_decl)
def append (self, value, **kw):
"""Add the value to the instance.
The value should be a DOM node or other value that is or can be
converted to a binding instance, or a string if the instance allows
mixed content. The value must be permitted by the content model.
@raise pyxb.ContentValidationError: the value is not permitted at the current
state of the content model.
"""
# @todo: Allow caller to provide default element use; it's available
# in saxer.
element_decl = kw.get('_element_decl', None)
maybe_element = kw.get('_maybe_element', True)
location = kw.get('_location', None)
if self._isNil():
raise pyxb.ContentInNilInstanceError(self, value, location)
fallback_namespace = kw.get('_fallback_namespace', None)
require_validation = kw.get('_require_validation', self._validationConfig.forBinding)
from_xml = kw.get('_from_xml', False)
element_binding = None
if element_decl is not None:
from pyxb.binding import content
assert isinstance(element_decl, content.ElementDeclaration)
element_binding = element_decl.elementBinding()
assert element_binding is not None
# Convert the value if it's XML and we recognize it.
if isinstance(value, xml.dom.Node):
from_xml = True
assert maybe_element
assert element_binding is None
node = value
require_validation = pyxb.GlobalValidationConfig.forBinding
if xml.dom.Node.COMMENT_NODE == node.nodeType:
# @todo: Note that we're allowing comments inside the bodies
# of simple content elements, which isn't really Hoyle.
return self
if node.nodeType in (xml.dom.Node.TEXT_NODE, xml.dom.Node.CDATA_SECTION_NODE):
value = node.data
maybe_element = False
else:
# Do type conversion here
assert xml.dom.Node.ELEMENT_NODE == node.nodeType
expanded_name = pyxb.namespace.ExpandedName(node, fallback_namespace=fallback_namespace)
(element_binding, element_decl) = self._ElementBindingDeclForName(expanded_name)
if element_binding is not None:
# If we have an element binding, we need to use it because
# it knows how to resolve substitution groups.
value = element_binding._createFromDOM(node, expanded_name, _fallback_namespace=fallback_namespace)
else:
# If we don't have an element binding, we might still be
# able to convert this if it has an xsi:type attribute
# that names a valid type.
xsi_type = XSI.type.getAttribute(node)
try_create = False
if xsi_type is not None:
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node)
(try_create, type_class) = XSI._InterpretTypeAttribute(xsi_type, ns_ctx, fallback_namespace, None)
if try_create:
value = element.CreateDOMBinding(node, None, _fallback_namespace=fallback_namespace)
else:
_log.warning('Unable to convert DOM node %s at %s to binding', expanded_name, getattr(node, 'location', '[UNAVAILABLE]'))
if (not maybe_element) and isinstance(value, basestring) and (self._ContentTypeTag in (self._CT_EMPTY, self._CT_ELEMENT_ONLY)):
if (0 == len(value.strip())) and not self._isNil():
return self
if maybe_element and (self.__automatonConfiguration is not None):
# Allows element content.
if not require_validation:
if element_decl is not None:
element_decl.setOrAppend(self, value)
return self
if self.__wildcardElements is not None:
self._appendWildcardElement(value)
return self
raise pyxb.StructuralBadDocumentError(container=self, content=value)
# Attempt to place the value based on the content model
num_cand = self.__automatonConfiguration.step(value, element_decl)
if 1 <= num_cand:
# Resolution was successful (possibly non-deterministic)
return self
# We couldn't place this thing. If it's element content, it has
# to be placed. Is it element content?
#
# If it's bound to an element, it's element content.
#
# Complex type instance? Element content.
#
# Uninterpretable DOM nodes or binding wrappers? Element content.
#
# Simple type definition? Well, if the thing we're trying to fill
# accepts mixed content or has simple content, technically we
# could convert the value to text and use it. So that's not
# element content.
if ((element_binding is not None)
or isinstance(value, (xml.dom.Node, complexTypeDefinition, pyxb.BIND))
or (isinstance(value, simpleTypeDefinition) and not (self._IsSimpleTypeContent() or self._IsMixed()))):
# Element content. If it has an automaton we can provide more
# information. If it doesn't, it must consume text and we should
# use a different exception.
if self.__automatonConfiguration:
raise pyxb.UnrecognizedContentError(self, self.__automatonConfiguration, value, location)
raise pyxb.NonElementValidationError(value, location)
# We have something that doesn't seem to be an element. Are we
# expecting simple content?
if self._IsSimpleTypeContent():
if self.__content is not None:
raise pyxb.ExtraSimpleContentError(self, value)
if not self._isNil():
if not isinstance(value, self._TypeDefinition):
value = self._TypeDefinition.Factory(value, _from_xml=from_xml)
self.__setContent(value)
if require_validation:
# NB: This only validates the value, not any associated
# attributes, which is correct to be parallel to complex
# content validation.
self.xsdConstraintsOK(location)
return self
# Do we allow non-element content?
if not self._IsMixed():
raise pyxb.MixedContentError(self, value, location)
# It's character information.
self._addContent(NonElementContent(value))
return self
def _appendWildcardElement (self, value):
self._addContent(ElementContent(value, None))
self.__wildcardElements.append(value)
def extend (self, value_list, _fallback_namespace=None, _from_xml=False, _location=None):
"""Invoke L{append} for each value in the list, in turn."""
[ self.append(_v, _fallback_namespace=_fallback_namespace, _from_xml=_from_xml, _location=_location) for _v in value_list ]
return self
def __setContent (self, value):
self.__content = value
return self.__content
def _addContent (self, wrapped_value):
# This assert is inadequate in the case of plural/non-plural elements with an STD_list base type.
# Trust that validation elsewhere was done correctly.
#assert self._IsMixed() or (not self._performValidation()) or isinstance(child, _TypeBinding_mixin) or isinstance(child, types.StringTypes), 'Unrecognized child %s type %s' % (child, type(child))
assert not (self._ContentTypeTag in (self._CT_EMPTY, self._CT_SIMPLE))
assert isinstance(wrapped_value, _Content)
self.__content.append(wrapped_value)
if isinstance(wrapped_value, ElementContent):
value = wrapped_value.value
ed = wrapped_value.elementDeclaration
if isinstance(value, _TypeBinding_mixin) and (ed is not None) and (value._element() is None):
assert isinstance(ed.elementBinding(), element)
value._setElement(ed.elementBinding())
@classmethod
def _IsMixed (cls):
return (cls._CT_MIXED == cls._ContentTypeTag)
def _finalizeContentModel (self):
# Override parent implementation.
if self.__automatonConfiguration:
self.__automatonConfiguration.resolveNondeterminism()
def _postDOMValidate (self):
# It's probably finalized already, but just in case...
self._finalizeContentModel()
if self._validationConfig.forBinding:
# @todo isNil should verify that no content is present.
if (not self._isNil()) and (self.__automatonConfiguration is not None):
if not self.__automatonConfiguration.isAccepting():
if self._IsSimpleTypeContent():
raise pyxb.SimpleContentAbsentError(self, self._location())
self.__automatonConfiguration.diagnoseIncompleteContent()
self._validateAttributes()
return self
def _setDOMFromAttributes (self, dom_support, element):
"""Add any appropriate attributes from this instance into the DOM element."""
for au in self._AttributeMap.itervalues():
if pyxb.GlobalValidationConfig.forDocument:
au.validate(self)
au.addDOMAttribute(dom_support, self, element)
return element
def _toDOM_csc (self, dom_support, parent):
"""Create a DOM element with the given tag holding the content of this instance."""
element = parent
self._setDOMFromAttributes(dom_support, element)
if self._isNil():
pass
elif self._CT_EMPTY == self._ContentTypeTag:
pass
elif self._CT_SIMPLE == self._ContentTypeTag:
if self.__content is None:
raise pyxb.SimpleContentAbsentError(self, self._location())
dom_support.appendTextChild(self.value().xsdLiteral(), element)
else:
if pyxb.GlobalValidationConfig.forDocument:
order = self._validatedChildren()
else:
order = self.__childrenForDOM()
for content in order:
assert content.value != self
if isinstance(content, NonElementContent):
dom_support.appendTextChild(content.value, element)
continue
if content.elementDeclaration is None:
if isinstance(content.value, xml.dom.Node):
dom_support.appendChild(content.value, element)
else:
content.value.toDOM(dom_support, parent)
else:
content.elementDeclaration.toDOM(dom_support, parent, content.value)
mixed_content = self.orderedContent()
for mc in mixed_content:
pass
return getattr(super(complexTypeDefinition, self), '_toDOM_csc', lambda *_args,**_kw: dom_support)(dom_support, parent)
@classmethod
def _IsSimpleTypeContent (cls):
"""CTDs with simple content are simple; other CTDs are not."""
return cls._CT_SIMPLE == cls._ContentTypeTag
@classmethod
def _description (cls, name_only=False, user_documentation=True):
name = cls._Name()
if name_only:
return name
desc = [ name ]
if cls._CT_EMPTY == cls._ContentTypeTag:
desc.append(', empty content')
elif cls._CT_SIMPLE == cls._ContentTypeTag:
desc.extend([', simple content type ', cls._TypeDefinition._description(name_only=True)])
else:
if cls._CT_MIXED == cls._ContentTypeTag:
desc.append(', mixed content')
else:
assert cls._CT_ELEMENT_ONLY == cls._ContentTypeTag
desc.append(', element-only content')
if (0 < len(cls._AttributeMap)) or (cls._AttributeWildcard is not None):
desc.append("\nAttributes:\n ")
desc.append("\n ".join([ _au._description(user_documentation=False) for _au in cls._AttributeMap.itervalues() ]))
if cls._AttributeWildcard is not None:
desc.append("\n Wildcard attribute(s)")
if (0 < len(cls._ElementMap)) or cls._HasWildcardElement:
desc.append("\nElements:\n ")
desc.append("\n ".join([ _eu._description(user_documentation=False) for _eu in cls._ElementMap.itervalues() ]))
if cls._HasWildcardElement:
desc.append("\n Wildcard element(s)")
return ''.join(desc)
## Local Variables:
## fill-column:78
## End:
|
webmedic/booker
|
refs/heads/master
|
src/gdata/tlslite/utils/Python_AES.py
|
48
|
"""Pure-Python AES implementation."""
from .cryptomath import *
from .AES import *
from .rijndael import rijndael
def new(key, mode, IV):
return Python_AES(key, mode, IV)
class Python_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "python")
self.rijndael = rijndael(key, 16)
self.IV = IV
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
plaintextBytes = stringToBytes(plaintext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(plaintextBytes)/16):
#XOR with the chaining block
blockBytes = plaintextBytes[x*16 : (x*16)+16]
for y in range(16):
blockBytes[y] ^= chainBytes[y]
blockString = bytesToString(blockBytes)
#Encrypt it
encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString))
#Overwrite the input with the output
for y in range(16):
plaintextBytes[(x*16)+y] = encryptedBytes[y]
#Set the next chaining block
chainBytes = encryptedBytes
self.IV = bytesToString(chainBytes)
return bytesToString(plaintextBytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
ciphertextBytes = stringToBytes(ciphertext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(ciphertextBytes)/16):
#Decrypt it
blockBytes = ciphertextBytes[x*16 : (x*16)+16]
blockString = bytesToString(blockBytes)
decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString))
#XOR with the chaining block and overwrite the input with output
for y in range(16):
decryptedBytes[y] ^= chainBytes[y]
ciphertextBytes[(x*16)+y] = decryptedBytes[y]
#Set the next chaining block
chainBytes = blockBytes
self.IV = bytesToString(chainBytes)
return bytesToString(ciphertextBytes)
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/django/test/utils.py
|
28
|
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.db import reset_queries
from django.db.models.options import Options
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator(object):
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Acts as either a decorator or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
super(override_settings, self).__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
class override_system_checks(TestContextDecorator):
"""
Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super(override_system_checks, self).__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = self.new_checks
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = self.deployment_checks
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")
def is_quoted_unicode(s):
s = s.strip()
return len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super(ignore_warnings, self).__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""
Decorator or context manager to temporary override the script prefix.
"""
def __init__(self, prefix):
self.prefix = prefix
super(override_script_prefix, self).__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super(isolate_apps, self).__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""
Decorator to add tags to a test class or method.
"""
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.