repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
pferreir/indico-plugins
|
refs/heads/master
|
importer/indico_importer/util.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
def convert_dt_tuple(dt_tuple):
split_datetime = dt_tuple[0].split('T')
if len(split_datetime) > 1:
return {'date': dt_tuple[0].split('T')[0],
'time': dt_tuple[0].split('T')[1]}
else:
return {'date': dt_tuple[0].split('T')[0],
'time': '00:00'}
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/lib/django-1.5/django/template/response.py
|
221
|
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
from django.utils import six
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
mimetype=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use tricky-to-debug problems
self.template_name = template
self.context_data = context
self._post_render_callbacks = []
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status,
mimetype)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = super(SimpleTemplateResponse, self).__getstate__()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, six.string_types):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Converts context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + \
['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, mimetype=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, mimetype)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
return RequestContext(self._request, context, current_app=self._current_app)
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_system_arp_table.py
|
13
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_arp_table
short_description: Configure ARP table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and arp_table category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_arp_table:
description:
- Configure ARP table.
default: null
type: dict
suboptions:
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IP address.
type: str
mac:
description:
- MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure ARP table.
fortios_system_arp_table:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_arp_table:
id: "3"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_arp_table_data(json):
option_list = ['id', 'interface', 'ip',
'mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_arp_table(data, fos):
vdom = data['vdom']
state = data['state']
system_arp_table_data = data['system_arp_table']
filtered_data = underscore_to_hyphen(filter_system_arp_table_data(system_arp_table_data))
if state == "present":
return fos.set('system',
'arp-table',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'arp-table',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_arp_table']:
resp = system_arp_table(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_arp_table": {
"required": False, "type": "dict", "default": None,
"options": {
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
HAShihab/fathmm
|
refs/heads/master
|
cgi-bin/fathmm.py
|
1
|
#!/usr/bin/python -u
import re
import math
import argparse
import ConfigParser
import MySQLdb
import MySQLdb.cursors
#
def map_position(domain, substitution):
"""
"""
if int(substitution[1:-1]) < int(domain['seq_begin']) or \
int(substitution[1:-1]) > int(domain['seq_end']):
return None
x = int(domain['seq_begin']) - 1
y = int(domain['hmm_begin']) - 1
for residue in list(domain['align']):
if residue.isupper() or residue.islower():
# upper-case (match)/lower-case (insertion) characters correspond
# to a sequence residue
x += 1
if residue.isupper() or residue == "-":
# upper-case (match)/gap (deletion) characters correspond to
# a HMM position
y += 1
if x == int(substitution[1:-1]):
if residue.isupper():
return str(y)
# unmapped residue (i.e. insertion)
return None
return None
#
def fetch_phenotype_prediction(Facade, Phenotype):
"""
"""
Phenotypes = ""
if not Arg.phenotypes:
return Phenotypes
for x in sorted(Facade, key=lambda x:x['information'], reverse=True):
# use the most informative HMM
if x['accession']:
dbCursor.execute("select * from PHENOTYPES where accession='" + x['accession'] + "' and type='" + Phenotype + "' and origin=1 order by score")
Phenotypes = "|".join([ x['description'] for x in dbCursor.fetchall() ])
if Phenotypes:
break
return Phenotypes
#
def Process(Protein, Substitution, Weights=None, Cutoff=None, Phenotype=None):
"""
"""
Processed = {
'Prediction' : "",
'Score': "",
'Phenotype': "",
'HMM': "",
'Description': "",
'Position': "",
'W': "",
'M': "",
'D': "",
'O': "",
'Warning': "",
}
# fetch pre-computed sequence record
dbCursor.execute("select a.* from SEQUENCE a, PROTEIN b where a.id=b.id and b.name='" + Protein + "'")
Sequence = dbCursor.fetchone()
if not Sequence:
Processed['Warning'] = "No Sequence Record Found"
return Processed
# authenticate protein/substitution
Warning = None
if not Warning and not re.compile("^[ARNDCEQGHILKMFPSTWYV]\d+[ARNDCEQGHILKMFPSTWYV]$", re.IGNORECASE).match(Substitution):
Warning = "Invalid Substitution Format"
if not Warning and int(Substitution[1:-1]) > len(Sequence['sequence']):
Warning = "Invalid Substitution Position"
if not Warning and not Substitution[0] == Sequence['sequence'][int(Substitution[1:-1]) - 1]:
Warning = "Inconsistent Wild-Type Residue (Expected '" + Sequence['sequence'][int(Substitution[1:-1]) - 1] + "')"
if not Warning and Substitution[0] == Substitution[-1]:
Warning = "Synonymous Mutation"
if Warning:
Processed['Warning'] = Warning
return Processed
# fetch pre-computed domain assignment(s)
dbCursor.execute("select * from DOMAINS where id='" + str(Sequence['id']) + "' and " + Substitution[1:-1] + " between seq_begin and seq_end order by score")
Domain = dbCursor.fetchall()
Facade = []
for x in Domain:
residue = map_position(x, Substitution)
if residue:
dbCursor.execute("select a.*, b.* from PROBABILITIES a, LIBRARY b where a.id=b.id and a.id='" + str(x['hmm']) + "' and a.position='" + residue + "'")
Prob = dbCursor.fetchone()
if Prob:
Facade.append(Prob)
#
# fetch phenotype association(s)
if Phenotype:
Processed['Phenotype'] = fetch_phenotype_prediction(Facade, Phenotype)
# derive/return a prediction ...
if not Weights or Weights == "UNWEIGHTED":
# append non-domain-based/sequence conservation to the Facade
dbCursor.execute("select a.*, b.* from PROBABILITIES a, LIBRARY b where a.id=b.id and a.id='" + str(Sequence['id']) + "' and a.position='" + Substitution[1:-1] + "'")
Prob = dbCursor.fetchone()
if Prob:
Facade.append(Prob)
for x in sorted(Facade, key=lambda x:x['information'], reverse=True):
try:
Processed['HMM'] = x['id']
Processed['Description'] = x['description']
Processed['Position'] = x['position']
Processed['W'] = x[Substitution[0]]
Processed['M'] = x[Substitution[-1]]
Processed['D'] = ""
Processed['O'] = ""
Processed['Score'] = "%.2f" % math.log((Processed['M'] / (1.0 - Processed['M'])) / (Processed['W'] / (1.0 - Processed['W'])), 2)
Processed['Prediction'] = ""
if Cutoff:
if float(Processed['Score']) <= Cutoff: Processed['Prediction'] = "DAMAGING"
if float(Processed['Score']) > Cutoff: Processed['Prediction'] = "TOLERATED"
return Processed
except Exception, e:
# skip the rare occasion(s) when probabilities are zero
pass
else:
# prioritize domain-based predictions
for x in sorted(Facade, key=lambda x:x['information'], reverse=True):
try:
dbCursor.execute("select * from WEIGHTS where id='" + x['id'] + "' and type='" + Weights + "'")
w = \
dbCursor.fetchone()
if w:
Processed['HMM'] = x['id']
Processed['Description'] = x['description']
Processed['Position'] = x['position']
Processed['W'] = x[Substitution[0]]
Processed['M'] = x[Substitution[-1]]
Processed['D'] = w['disease'] + 1.0
Processed['O'] = w['other'] + 1.0
Processed['Score'] = "%.2f" % math.log(((1.0 - Processed['W']) * Processed['O']) / ((1.0 - Processed['M']) * Processed['D']), 2)
Processed['Prediction'] = ""
if Cutoff:
if Weights == "INHERITED":
if float(Processed['Score']) <= Cutoff: Processed['Prediction'] = "DAMAGING"
if float(Processed['Score']) > Cutoff: Processed['Prediction'] = "TOLERATED"
if Weights == "CANCER":
if float(Processed['Score']) <= Cutoff: Processed['Prediction'] = "CANCER"
if float(Processed['Score']) > Cutoff: Processed['Prediction'] = "PASSENGER/OTHER"
return Processed
except Exception, e:
# skip the rare occasion(s) when probabilities are zero
pass
# no domain-based prediction, use a non-domain-based/sequence conservation prediction
dbCursor.execute("select a.*, b.* from PROBABILITIES a, LIBRARY b where a.id=b.id and a.id='" + str(Sequence['id']) + "' and a.position='" + Substitution[1:-1] + "'")
Facade = dbCursor.fetchone()
if Facade:
try:
dbCursor.execute("select * from WEIGHTS where id='" + Facade['id'] + "' and type='" + Weights + "'")
w = \
dbCursor.fetchone()
if w:
Processed['HMM'] = Facade['id']
Processed['Description'] = Facade['description']
Processed['Position'] = Facade['position']
Processed['W'] = Facade[Substitution[0]]
Processed['M'] = Facade[Substitution[-1]]
Processed['D'] = w['disease'] + 1.0
Processed['O'] = w['other'] + 1.0
Processed['Score'] = "%.2f" % math.log(((1.0 - Processed['W']) * Processed['O']) / ((1.0 - Processed['M']) * Processed['D']), 2)
Processed['Prediction'] = ""
if Cutoff:
if Weights == "INHERITED":
if float(Processed['Score']) <= Cutoff: Processed['Prediction'] = "DAMAGING"
if float(Processed['Score']) > Cutoff: Processed['Prediction'] = "TOLERATED"
if Weights == "CANCER":
if float(Processed['Score']) <= Cutoff: Processed['Prediction'] = "CANCER"
if float(Processed['Score']) > Cutoff: Processed['Prediction'] = "PASSENGER/OTHER"
return Processed
except Exception, e:
# skip the rare occasion(s) when probabilities are zero
raise
return None
#
if __name__ == '__main__':
#
Config = ConfigParser.ConfigParser()
Config.read("./config.ini")
dbCursor = MySQLdb.connect(
host = str(Config.get("DATABASE", "HOST")),
port = int(Config.get("DATABASE", "PORT")),
user = str(Config.get("DATABASE", "USER")),
passwd = str(Config.get("DATABASE", "PASSWD")),
db = str(Config.get("DATABASE", "DB")),
compress = 1
).cursor(MySQLdb.cursors.DictCursor)
# parse program argument(s)
parser = argparse.ArgumentParser(
description = 'Functional Analysis through Hidden Markov Models',
add_help = False
)
parser.add_argument(
"-h",
"--help",
action = "help",
help = argparse.SUPPRESS
)
group = \
parser.add_argument_group("Required")
group.add_argument(
'fi',
metavar = '<F1>',
type = argparse.FileType("r"),
help = 'a file containing the mutation data to process'
)
group.add_argument(
'fo',
metavar = '<F2>',
type = argparse.FileType("w"),
help = 'where predictions/phenotype-associations will be written'
)
group = \
parser.add_argument_group("Options")
group.add_argument(
'-w',
dest = 'weights',
metavar = "<S>",
default = "INHERITED",
help = "use pathogenicity weights <S> when returning predictions"
)
group.add_argument(
'-t',
dest = 'threshold',
metavar = "<N>",
default = None,
type = float,
help = "use prediction threshold <N> when returning predictions"
)
group.add_argument(
'-p',
dest = 'phenotypes',
metavar = "<S>",
default = "",
help = "use phenotype ontology <S> when returning domain-phenotype associations"
); Arg = parser.parse_args()
if Arg.weights:
# authenticate prediction weights
dbCursor.execute("select distinct type from WEIGHTS")
if not Arg.weights.upper() in [ x['type'] for x in dbCursor.fetchall() ] + [ "UNWEIGHTED" ]:
parser.error("argument -w: invalid option: '{0}'".format(Arg.weights))
if Arg.threshold == None:
# initialize predcition threshold
if Arg.weights.upper() == "UNWEIGHTED": Arg.threshold = -3.00
if Arg.weights.upper() == "INHERITED": Arg.threshold = -1.50
if Arg.weights.upper() == "CANCER": Arg.threshold = -0.75
if Arg.phenotypes:
# authenticate prediction phenotype
dbCursor.execute("select distinct type from PHENOTYPES")
if not Arg.phenotypes.upper() in [ x['type'] for x in dbCursor.fetchall() ]:
parser.error("argument -p: invalid option: '{0}'".format(Arg.phenotypes))
##
Arg.fo.write("\t".join([ "#",
"dbSNP ID",
"Protein ID",
"Substitution",
"Prediction",
"Score",
"Domain-Phenotype Association",
"Warning",
"HMM ID",
"HMM Description",
"HMM Pos.",
"HMM Prob. W.",
"HMM Prob. M.",
"HMM Weights D.",
"HMM Weights O."
]) + "\n")
idx = 1
for record in Arg.fi:
record = record.strip()
if record and not record.startswith("#"):
try:
if re.compile("^rs\d+$", re.IGNORECASE).match(record):
dbCursor.execute("select distinct * from VARIANTS where id='" + record + "'")
dbRecords = dbCursor.fetchall()
if not dbRecords:
Arg.fo.write(
"\t".join([ str(idx),
record,
"",
"",
"",
"",
"",
"No dbSNP Mapping(s)",
"",
"",
"",
"",
"",
"",
""
]) + "\n"
); idx += 1; continue
for x in dbRecords:
dbSNP = x['id']
Protein = x['protein']
Substitution = x['substitution']
Prediction = \
Process(Protein, Substitution, Weights=Arg.weights.upper(), Cutoff=Arg.threshold, Phenotype=Arg.phenotypes.upper())
if not Prediction:
Arg.fo.write(
"\t".join([ str(idx),
dbSNP,
Protein,
Substitution,
"",
"",
"",
"No Prediction Available",
"",
"",
"",
"",
"",
"",
""
]) + "\n"
); idx += 1; continue
Arg.fo.write(
"\t".join([ str(idx),
dbSNP,
Protein,
Substitution,
str(Prediction['Prediction']),
str(Prediction['Score']),
str(Prediction['Phenotype']),
str(Prediction['Warning']),
str(Prediction['HMM']),
str(Prediction['Description']),
str(Prediction['Position']),
str(Prediction['W']),
str(Prediction['M']),
str(Prediction['D']),
str(Prediction['O'])
]) + "\n"
); idx += 1; continue
else:
dbSNP = ""
Protein = record.upper().split()[0]
for Substitution in [ x.strip() for x in record.upper().split()[1].split(",") ]:
Prediction = \
Process(Protein, Substitution, Weights=Arg.weights.upper(), Cutoff=Arg.threshold, Phenotype=Arg.phenotypes.upper())
if not Prediction:
Arg.fo.write(
"\t".join([ str(idx),
dbSNP,
Protein,
Substitution,
"",
"",
"",
"No Prediction Available",
"",
"",
"",
"",
"",
"",
""
]) + "\n"
); idx += 1; continue
Arg.fo.write(
"\t".join([ str(idx),
dbSNP,
Protein,
Substitution,
str(Prediction['Prediction']),
str(Prediction['Score']),
str(Prediction['Phenotype']),
str(Prediction['Warning']),
str(Prediction['HMM']),
str(Prediction['Description']),
str(Prediction['Position']),
str(Prediction['W']),
str(Prediction['M']),
str(Prediction['D']),
str(Prediction['O'])
]) + "\n"
); idx += 1; continue
except Exception, e:
Arg.fo.write(
"\t".join([ str(idx),
"",
"",
"",
"",
"",
"",
"An Error Occured While Processing The Record: " + record,
"",
"",
"",
"",
"",
""
]) + "\n"
); idx += 1
|
frewsxcv/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/serve/serve.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import os
import signal
import socket
import sys
import threading
import time
import traceback
import urllib2
import uuid
from collections import defaultdict, OrderedDict
from multiprocessing import Process, Event
from ..localpaths import repo_root
import sslutils
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve.logger import set_logger
from mod_pywebsocket import standalone as pywebsocket
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
class WorkersHandler(object):
def __init__(self):
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
return self.handler(request, response)
def handle_request(self, request, response):
worker_path = replace_end(request.url_parts.path, ".worker", ".worker.js")
return """<!doctype html>
<meta charset=utf-8>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%s"));
</script>
""" % (worker_path,)
class AnyHtmlHandler(object):
def __init__(self):
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
return self.handler(request, response)
def handle_request(self, request, response):
test_path = replace_end(request.url_parts.path, ".any.html", ".any.js")
return """\
<!doctype html>
<meta charset=utf-8>
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script src="%s"></script>
""" % (test_path,)
class AnyWorkerHandler(object):
def __init__(self):
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
return self.handler(request, response)
def handle_request(self, request, response):
test_path = replace_end(request.url_parts.path, ".any.worker.js", ".any.js")
return """\
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
importScripts("%s");
done();
""" % (test_path,)
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
subdomains = [u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/serve.py", handlers.ErrorHandler(404))]
self.static = [
("GET", "*.worker", WorkersHandler()),
("GET", "*.any.html", AnyHtmlHandler()),
("GET", "*.any.worker.js", AnyWorkerHandler()),
]
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.static
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_static(self, path, format_args, content_type, route):
handler = handlers.StaticHandler(path, format_args, content_type)
self.static.append((b"GET", str(route), handler))
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [("GET", "*.asis", handlers.AsIsHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
handler_cls(base_path=path, url_base=url_base)))
def default_routes():
return RoutesBuilder().get_routes()
def setup_logger(level):
import logging
global logger
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
set_logger(logger)
def open_socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(5)
return sock
def get_port():
free_socket = open_socket(0)
port = free_socket.getsockname()[1]
logger.debug("Going to use port %s" % port)
free_socket.close()
return port
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config))
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except:
print(traceback.format_exc(), file=sys.stderr)
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(host, paths, bind_hostname, ssl_config):
port = get_port()
subdomains = get_subdomains(host)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, default_routes(), bind_hostname,
None, ssl_config)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (host, port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (host, port))
sys.exit(1)
for subdomain, (punycode, host) in subdomains.iteritems():
domain = "%s.%s" % (punycode, host)
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
sys.exit(1)
wrapper.wait()
def get_subdomains(host):
#This assumes that the tld is ascii-only or already in punycode
return {subdomain: (subdomain.encode("idna"), host)
for subdomain in subdomains}
def start_servers(host, ports, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
servers = defaultdict(list)
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
if port is None:
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=True,
key_file=ssl_config["key_path"],
certificate=ssl_config["cert_path"],
encrypt_after_connect=ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname,
ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print("No SSL module available")
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_hostname):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config = None)
def start_wss_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config)
def get_ports(config, ssl_environment):
rv = defaultdict(list)
for scheme, ports in config["ports"].iteritems():
for i, port in enumerate(ports):
if scheme in ["wss", "https"] and not ssl_environment.ssl_enabled:
port = None
if port == "auto":
port = get_port()
else:
port = port
rv[scheme].append(port)
return rv
def normalise_config(config, ports):
host = config["external_host"] if config["external_host"] else config["host"]
domains = get_subdomains(host)
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
for key, value in domains.iteritems():
domains[key] = ".".join(value)
domains[""] = host
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
return {"host": host,
"domains": domains,
"ports": ports_}
def get_ssl_config(config, external_domains, ssl_environment):
key_path, cert_path = ssl_environment.host_cert_path(external_domains)
return {"key_path": key_path,
"cert_path": cert_path,
"encrypt_after_connect": config["ssl"]["encrypt_after_connect"]}
def start(config, ssl_environment, routes, **kwargs):
host = config["host"]
domains = get_subdomains(host)
ports = get_ports(config, ssl_environment)
bind_hostname = config["bind_hostname"]
paths = {"doc_root": config["doc_root"],
"ws_doc_root": config["ws_doc_root"]}
external_config = normalise_config(config, ports)
ssl_config = get_ssl_config(config, external_config["domains"].values(), ssl_environment)
if config["check_subdomains"]:
check_subdomains(host, paths, bind_hostname, ssl_config)
servers = start_servers(host, ports, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
return external_config, servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def value_set(config, key):
return key in config and config[key] is not None
def get_value_or_default(config, key, default=None):
return config[key] if value_set(config, key) else default
def set_computed_defaults(config):
if not value_set(config, "doc_root"):
config["doc_root"] = repo_root
if not value_set(config, "ws_doc_root"):
root = get_value_or_default(config, "doc_root", default=repo_root)
config["ws_doc_root"] = os.path.join(root, "websockets", "handlers")
def merge_json(base_obj, override_obj):
rv = {}
for key, value in base_obj.iteritems():
if key not in override_obj:
rv[key] = value
else:
if isinstance(value, dict):
rv[key] = merge_json(value, override_obj[key])
else:
rv[key] = override_obj[key]
return rv
def get_ssl_environment(config):
implementation_type = config["ssl"]["type"]
cls = sslutils.environments[implementation_type]
try:
kwargs = config["ssl"][implementation_type].copy()
except KeyError:
raise ValueError("%s is not a vaid ssl type." % implementation_type)
return cls(logger, **kwargs)
def load_config(default_path, override_path=None, **kwargs):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
else:
override_obj = {}
rv = merge_json(base_obj, override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
base_obj = rv
with open(other_path) as f:
override_obj = json.load(f)
rv = merge_json(base_obj, override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
rv[key] = value
set_computed_defaults(rv)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
return parser
def main():
kwargs = vars(get_parser().parse_args())
config = load_config("config.default.json",
"config.json",
**kwargs)
setup_logger(config["log_level"])
with stash.StashServer((config["host"], get_port()), authkey=str(uuid.uuid4())):
with get_ssl_environment(config) as ssl_env:
config_, servers = start(config, ssl_env, default_routes(), **kwargs)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
|
cslzchen/osf.io
|
refs/heads/develop
|
scripts/register_oauth_scopes.py
|
13
|
"""
Register the list of OAuth2 scopes that can be requested by third parties. This populates the Postgres collection
referenced by CAS when responding to authorization grant requests. The database class is minimal; the exact
specification for what a scope contains lives in the python module from which this collection is drawn.
"""
import sys
import logging
import django
from django.db import transaction
django.setup()
from scripts import utils as script_utils
from framework.auth import oauth_scopes
from osf.models import ApiOAuth2Scope
from website.app import init_app
logger = logging.getLogger(__name__)
def get_or_create(name, description, save=True):
"""
Populate or update the database entry, as needed
:param name: the name of the scope
:param description: the description of the scope
:return: the scope object
"""
if name != name.lower():
raise ValueError('Scope names are case-sensitive, and should always be lower-case.')
try:
scope_obj = ApiOAuth2Scope.objects.get(name=name)
setattr(scope_obj, 'description', description)
print('Updating existing database entry for: {}'.format(name))
except ApiOAuth2Scope.DoesNotExist:
scope_obj = ApiOAuth2Scope(name=name, description=description)
print('Created new database entry for: {}'.format(name))
if save:
scope_obj.save()
return scope_obj
def do_populate(clear=False):
"""
Given a dictionary of scope definitions, {name: scope_namedtuple}, load the
resulting data into a database collection
"""
scope_dict = oauth_scopes.public_scopes
if clear:
ApiOAuth2Scope.objects.all().delete()
for name, scope in scope_dict.items():
# Update a scope if it exists, else populate
if scope.is_public is True:
get_or_create(name, scope.description, save=True)
else:
logger.info('{} is not a publicly advertised scope; did not load into database'.format(name))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_populate(clear=True)
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Edraak/edraak-platform
|
refs/heads/master
|
openedx/core/djangoapps/embargo/tests/test_models.py
|
33
|
"""Test of models for embargo app"""
import json
from django.test import TestCase
from django.db.utils import IntegrityError
from opaque_keys.edx.locator import CourseLocator
from ..models import (
EmbargoedCourse, EmbargoedState, IPFilter, RestrictedCourse,
Country, CountryAccessRule, CourseAccessRuleHistory
)
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
class EmbargoModelsTest(CacheIsolationTestCase):
"""Test each of the 3 models in embargo.models"""
ENABLED_CACHES = ['default']
def test_course_embargo(self):
course_id = CourseLocator('abc', '123', 'doremi')
# Test that course is not authorized by default
self.assertFalse(EmbargoedCourse.is_embargoed(course_id))
# Authorize
cauth = EmbargoedCourse(course_id=course_id, embargoed=True)
cauth.save()
# Now, course should be embargoed
self.assertTrue(EmbargoedCourse.is_embargoed(course_id))
self.assertEquals(
unicode(cauth),
u"Course '{course_id}' is Embargoed".format(course_id=course_id)
)
# Unauthorize by explicitly setting email_enabled to False
cauth.embargoed = False
cauth.save()
# Test that course is now unauthorized
self.assertFalse(EmbargoedCourse.is_embargoed(course_id))
self.assertEquals(
unicode(cauth),
u"Course '{course_id}' is Not Embargoed".format(course_id=course_id)
)
def test_state_embargo(self):
# Azerbaijan and France should not be blocked
good_states = ['AZ', 'FR']
# Gah block USA and Antartica
blocked_states = ['US', 'AQ']
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in blocked_states + good_states:
self.assertNotIn(state, currently_blocked)
# Block
cauth = EmbargoedState(embargoed_countries='US, AQ')
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
self.assertNotIn(state, currently_blocked)
for state in blocked_states:
self.assertIn(state, currently_blocked)
# Change embargo - block Isle of Man too
blocked_states.append('IM')
cauth.embargoed_countries = 'US, AQ, IM'
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
self.assertNotIn(state, currently_blocked)
for state in blocked_states:
self.assertIn(state, currently_blocked)
def test_ip_blocking(self):
whitelist = '127.0.0.1'
blacklist = '18.244.51.3'
cwhitelist = IPFilter.current().whitelist_ips
self.assertNotIn(whitelist, cwhitelist)
cblacklist = IPFilter.current().blacklist_ips
self.assertNotIn(blacklist, cblacklist)
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
self.assertIn(whitelist, cwhitelist)
cblacklist = IPFilter.current().blacklist_ips
self.assertIn(blacklist, cblacklist)
def test_ip_network_blocking(self):
whitelist = '1.0.0.0/24'
blacklist = '1.1.0.0/16'
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
self.assertIn('1.0.0.100', cwhitelist)
self.assertIn('1.0.0.10', cwhitelist)
self.assertNotIn('1.0.1.0', cwhitelist)
cblacklist = IPFilter.current().blacklist_ips
self.assertIn('1.1.0.0', cblacklist)
self.assertIn('1.1.0.1', cblacklist)
self.assertIn('1.1.1.0', cblacklist)
self.assertNotIn('1.2.0.0', cblacklist)
class RestrictedCourseTest(CacheIsolationTestCase):
"""Test RestrictedCourse model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
restricted_course = RestrictedCourse.objects.create(course_key=course_id)
self.assertEquals(
unicode(restricted_course),
unicode(course_id)
)
def test_restricted_course_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
RestrictedCourse.objects.create(course_key=course_id)
# Warm the cache
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
self.assertFalse(RestrictedCourse.is_disabled_access_check(course_id))
# add new the course so the cache must get delete and again hit the db
new_course_id = CourseLocator('def', '123', 'doremi')
RestrictedCourse.objects.create(course_key=new_course_id, disable_access_check=True)
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
self.assertTrue(RestrictedCourse.is_disabled_access_check(new_course_id))
# deleting an object will delete cache also.and hit db on
# get the is_restricted course
abc = RestrictedCourse.objects.get(course_key=new_course_id)
abc.delete()
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
class CountryTest(TestCase):
"""Test Country model. """
def test_unicode_values(self):
country = Country.objects.create(country='NZ')
self.assertEquals(unicode(country), "New Zealand (NZ)")
class CountryAccessRuleTest(CacheIsolationTestCase):
"""Test CountryAccessRule model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
self.assertEquals(
unicode(access_rule),
u"Whitelist New Zealand (NZ) for {course_key}".format(course_key=course_id)
)
course_id = CourseLocator('def', '123', 'doremi')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
self.assertEquals(
unicode(access_rule),
u"Blacklist New Zealand (NZ) for {course_key}".format(course_key=course_id)
)
def test_unique_together_constraint(self):
"""
Course with specific country can be added either as whitelist or blacklist
trying to add with both types will raise error
"""
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
with self.assertRaises(IntegrityError):
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
def test_country_access_list_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
course = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
# Warm the cache
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
with self.assertNumQueries(0):
CountryAccessRule.check_country_access(course_id, 'NZ')
# Deleting an object will invalidate the cache
course.delete()
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
class CourseAccessRuleHistoryTest(TestCase):
"""Test course access rule history. """
def setUp(self):
super(CourseAccessRuleHistoryTest, self).setUp()
self.course_key = CourseLocator('edx', 'DemoX', 'Demo_Course')
self.restricted_course = RestrictedCourse.objects.create(course_key=self.course_key)
self.countries = {
'US': Country.objects.create(country='US'),
'AU': Country.objects.create(country='AU')
}
def test_course_access_history_no_rules(self):
self._assert_history([])
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_with_rules(self):
# Add one rule
us_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
self._assert_history([('US', 'whitelist')])
# Add another rule
au_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['AU'],
rule_type=CountryAccessRule.BLACKLIST_RULE
)
self._assert_history([
('US', 'whitelist'),
('AU', 'blacklist')
])
# Delete the first rule
us_rule.delete()
self._assert_history([('AU', 'blacklist')])
# Delete the second rule
au_rule.delete()
self._assert_history([])
def test_course_access_history_delete_all(self):
# Create a rule
CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
# Delete the course (and, implicitly, all the rules)
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_change_message(self):
# Change the message key
self.restricted_course.enroll_msg_key = 'embargo'
self.restricted_course.access_msg_key = 'embargo'
self.restricted_course.save()
# Expect a history entry with the changed keys
self._assert_history([], enroll_msg='embargo', access_msg='embargo')
def _assert_history(self, country_rules, enroll_msg='default', access_msg='default'):
"""Check the latest history entry.
Arguments:
country_rules (list): List of rules, each of which are tuples
of the form `(country_code, rule_type)`.
Keyword Arguments:
enroll_msg (str): The expected enrollment message key.
access_msg (str): The expected access message key.
Raises:
AssertionError
"""
record = CourseAccessRuleHistory.objects.latest()
# Check that the record is for the correct course
self.assertEqual(record.course_key, self.course_key)
# Load the history entry and verify the message keys
snapshot = json.loads(record.snapshot)
self.assertEqual(snapshot['enroll_msg'], enroll_msg)
self.assertEqual(snapshot['access_msg'], access_msg)
# For each rule, check that there is an entry
# in the history record.
for (country, rule_type) in country_rules:
self.assertIn(
{
'country': country,
'rule_type': rule_type
},
snapshot['country_rules']
)
# Check that there are no duplicate entries
self.assertEqual(len(snapshot['country_rules']), len(country_rules))
def _assert_history_deleted(self):
"""Check the latest history entry for a 'DELETED' placeholder.
Raises:
AssertionError
"""
record = CourseAccessRuleHistory.objects.latest()
self.assertEqual(record.course_key, self.course_key)
self.assertEqual(record.snapshot, "DELETED")
|
sn1k/app_mundial
|
refs/heads/master
|
lib/python2.7/site-packages/django/templatetags/static.py
|
227
|
from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
|
simpleenergy/bughouse-ranking
|
refs/heads/master
|
bughouse/ratings/utils.py
|
1
|
import decimal
def elo_chance_to_lose(player, opponent):
"""
Probability = 1 / (1 + (10 ^ -((White Rating - Black Rating) / 400)))
"""
diff = player - opponent
return 1.0 / (1 + pow(10, (diff / 400.0)))
def elo_chance_to_win(player, opponent):
return 1 - elo_chance_to_lose(player, opponent)
def round_it(num):
"""
An integer rounding function that uses `ROUND_HALF_EVEN`.
"""
return int(
decimal.Decimal(num).quantize(
decimal.Decimal('1'),
rounding=decimal.ROUND_HALF_EVEN,
)
)
|
Kri-7-q/glimpse_client-1
|
refs/heads/develop
|
3rdparty/breakpad/src/testing/scripts/generator/cpp/ast.py
|
209
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, None, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
|
probcomp/bayeslite
|
refs/heads/master
|
external/plex/dist/Plex/Transitions.py
|
1
|
#
# Plex - Transition Maps
#
# This version represents state sets direcly as dicts
# for speed.
#
from copy import copy
import string
from sys import maxint
from types import TupleType
class TransitionMap:
"""
A TransitionMap maps an input event to a set of states.
An input event is one of: a range of character codes,
the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
For characters, this implementation compactly represents
the map by means of a list:
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
map = None # The list of codes and states
special = None # Mapping for special events
def __init__(self, map = None, special = None):
if not map:
map = [-maxint, {}, maxint]
if not special:
special = {}
self.map = map
self.special = special
#self.check() ###
def add(self, event, new_state,
TupleType = TupleType):
"""
Add transition to |new_state| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1][new_state] = 1
i = i + 2
else:
self.get_special(event)[new_state] = 1
def add_set(self, event, new_set,
TupleType = TupleType):
"""
Add transitions to the states in |new_set| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1].update(new_set)
i = i + 2
else:
self.get_special(event).update(new_set)
def get_epsilon(self):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('')
def items(self,
len = len):
"""
Return the mapping as a list of ((code1, code2), state_set) and
(special_event, state_set) pairs.
"""
result = []
map = self.map
else_set = map[1]
i = 0
n = len(map) - 1
code0 = map[0]
while i < n:
set = map[i + 1]
code1 = map[i + 2]
if set or else_set:
result.append(((code0, code1), set))
code0 = code1
i = i + 2
for event, set in self.special.items():
if set:
result.append((event, set))
return result
# ------------------- Private methods --------------------
def split(self, code,
len = len, maxint = maxint):
"""
Search the list for the position of the split point for |code|,
inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
map = self.map
hi = len(map) - 1
# Special case: code == map[-1]
if code == maxint:
return hi
# General case
lo = 0
# loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2
while hi - lo >= 4:
# Find midpoint truncated to even index
mid = ((lo + hi) / 2) & ~1
if code < map[mid]:
hi = mid
else:
lo = mid
# map[lo] <= code < map[hi] and hi - lo == 2
if map[lo] == code:
return lo
else:
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
"""
special = self.special
set = special.get(event, None)
if not set:
set = {}
special[event] = set
return set
# --------------------- Conversion methods -----------------------
def __str__(self):
map_strs = []
map = self.map
n = len(map)
i = 0
while i < n:
code = map[i]
if code == -maxint:
code_str = "-inf"
elif code == maxint:
code_str = "inf"
else:
code_str = str(code)
map_strs.append(code_str)
i = i + 1
if i < n:
map_strs.append(state_set_str(map[i]))
i = i + 1
special_strs = {}
for event, set in self.special.items():
special_strs[event] = state_set_str(set)
return "[%s]+%s" % (
string.join(map_strs, ","),
special_strs
)
# --------------------- Debugging methods -----------------------
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print self
assert 0
def dump(self, file):
map = self.map
i = 0
n = len(map) - 1
while i < n:
self.dump_range(map[i], map[i + 2], map[i + 1], file)
i = i + 2
for event, set in self.special.items():
if set:
if not event:
event = 'empty'
self.dump_trans(event, set, file)
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
if code1 == maxint:
k = "any"
else:
k = "< %s" % self.dump_char(code1)
elif code1 == maxint:
k = "> %s" % self.dump_char(code0 - 1)
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
def dump_set(self, set):
return state_set_str(set)
#
# State set manipulation functions
#
#def merge_state_sets(set1, set2):
# for state in set2.keys():
# set1[state] = 1
def state_set_str(set):
state_list = set.keys()
str_list = []
for state in state_list:
str_list.append("S%d" % state.number)
return "[%s]" % string.join(str_list, ",")
|
numenta/nupic.fluent.server
|
refs/heads/master
|
utils/__init__.py
|
12133432
| |
eltonsantos/django
|
refs/heads/master
|
django/contrib/staticfiles/templatetags/__init__.py
|
12133432
| |
OpenData-Canarias/ckanext-feedNTI
|
refs/heads/master
|
ckanext/feedNTI/tests/__init__.py
|
12133432
| |
jhawkesworth/ansible
|
refs/heads/devel
|
test/units/modules/network/opx/__init__.py
|
12133432
| |
mrquim/mrquimrepo
|
refs/heads/master
|
repo/script.module.schism.common/lib/requests/__init__.py
|
71
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.13.0'
__build__ = 0x021300
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
|
samsu/neutron
|
refs/heads/master
|
db/migration/models/head.py
|
6
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db import agents_db # noqa
from neutron.db import agentschedulers_db # noqa
from neutron.db import allowedaddresspairs_db # noqa
from neutron.db import dvr_mac_db # noqa
from neutron.db import external_net_db # noqa
from neutron.db import extradhcpopt_db # noqa
from neutron.db import extraroute_db # noqa
from neutron.db.firewall import firewall_db # noqa
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db # noqa
from neutron.db import l3_db # noqa
from neutron.db import l3_dvrscheduler_db # noqa
from neutron.db import l3_gwmode_db # noqa
from neutron.db import l3_hamode_db # noqa
from neutron.db.loadbalancer import loadbalancer_db # noqa
from neutron.db.metering import metering_db # noqa
from neutron.db import model_base
from neutron.db import models_v2 # noqa
from neutron.db import portbindings_db # noqa
from neutron.db import portsecurity_db # noqa
from neutron.db import quota_db # noqa
from neutron.db import routedserviceinsertion_db # noqa
from neutron.db import routerservicetype_db # noqa
from neutron.db import securitygroups_db # noqa
from neutron.db import servicetype_db # noqa
from neutron.db.vpn import vpn_db # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.hyperv import model # noqa
from neutron.plugins.linuxbridge.db import l2network_models_v2 # noqa
from neutron.plugins.metaplugin import meta_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers.cisco.apic import apic_model # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.plugins.mlnx.db import mlnx_models_v2 # noqa
from neutron.plugins.nec.db import models as nec_models # noqa
from neutron.plugins.nec.db import packetfilter as nec_packetfilter # noqa
from neutron.plugins.nec.db import router # noqa
from neutron.plugins.nuage import nuage_models # noqa
from neutron.plugins.openvswitch import ovs_models_v2 # noqa
from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 # noqa
from neutron.plugins.vmware.dbexts import lsn_db # noqa
from neutron.plugins.vmware.dbexts import maclearning # noqa
from neutron.plugins.vmware.dbexts import models as vmware_models # noqa
from neutron.plugins.vmware.dbexts import networkgw_db # noqa
from neutron.plugins.vmware.dbexts import qos_db # noqa
from neutron.plugins.vmware.dbexts import vcns_models # noqa
from neutron.services.loadbalancer import agent_scheduler # noqa
from neutron.services.loadbalancer.drivers.embrane import ( # noqa
models as embrane_models)
from neutron.services.vpn.service_drivers import cisco_csr_db # noqa
def get_metadata():
return model_base.BASEV2.metadata
|
nirmeshk/oh-mainline
|
refs/heads/master
|
vendor/packages/distribute/setuptools/__init__.py
|
132
|
"""Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension, Library
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core, setuptools.command
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
__version__ = '0.6'
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The distribute_setup script for instance, will check if this
# attribute is present to decide whether to reinstall the package
# or not.
_distribute = True
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
|
ogrisel/scipy
|
refs/heads/master
|
scipy/io/tests/test_mmio.py
|
40
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from tempfile import mkdtemp, mktemp
import os
import shutil
from numpy import array,transpose
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal, \
assert_equal, rand
import scipy.sparse
from scipy.io.mmio import mminfo,mmread,mmwrite
class TestMMIOArray(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_simple(self):
a = [[1,2],[3,4]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','integer','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_rectangular(self):
a = [[1,2,3],[4,5,6]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,3,6,'array','integer','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_rectangular_real(self):
a = [[1,2],[3.5,4],[5,6]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(3,2,6,'array','real','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_real(self):
a = [[1,2],[3,4.0]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','real','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_complex(self):
a = [[1,2],[3,4j]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','complex','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_symmetric(self):
a = [[1,2],[2,4]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','integer','symmetric'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_skew_symmetric(self):
a = [[1,2],[-2,4]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','integer','skew-symmetric'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_skew_symmetric_float(self):
a = array([[1,2],[-2.0,4]],'f')
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','real','skew-symmetric'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_simple_hermitian(self):
a = [[1,2+3j],[2-3j,4]]
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(2,2,4,'array','complex','hermitian'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_random_symmetric_real(self):
sz = (20,20)
a = rand(*sz)
a = a + transpose(a)
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(20,20,400,'array','real','symmetric'))
b = mmread(fn)
assert_array_almost_equal(a,b)
def test_random_rect_real(self):
sz = (20,15)
a = rand(*sz)
fn = self.fn
mmwrite(fn,a)
assert_equal(mminfo(fn),(20,15,300,'array','real','general'))
b = mmread(fn)
assert_array_almost_equal(a,b)
_general_example = '''\
%%MatrixMarket matrix coordinate real general
%=================================================================================
%
% This ASCII file represents a sparse MxN matrix with L
% nonzeros in the following Matrix Market format:
%
% +----------------------------------------------+
% |%%MatrixMarket matrix coordinate real general | <--- header line
% |% | <--+
% |% comments | |-- 0 or more comment lines
% |% | <--+
% | M N L | <--- rows, columns, entries
% | I1 J1 A(I1, J1) | <--+
% | I2 J2 A(I2, J2) | |
% | I3 J3 A(I3, J3) | |-- L lines
% | . . . | |
% | IL JL A(IL, JL) | <--+
% +----------------------------------------------+
%
% Indices are 1-based, i.e. A(1,1) is the first element.
%
%=================================================================================
5 5 8
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
'''
_hermitian_example = '''\
%%MatrixMarket matrix coordinate complex hermitian
5 5 7
1 1 1.0 0
2 2 10.5 0
4 2 250.5 22.22
3 3 1.5e-2 0
4 4 -2.8e2 0
5 5 12. 0
5 4 0 33.32
'''
_skew_example = '''\
%%MatrixMarket matrix coordinate real skew-symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 0
'''
_symmetric_example = '''\
%%MatrixMarket matrix coordinate real symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 8
'''
_symmetric_pattern_example = '''\
%%MatrixMarket matrix coordinate pattern symmetric
5 5 7
1 1
2 2
4 2
3 3
4 4
5 5
5 4
'''
class TestMMIOCoordinate(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_read_general(self):
fn = self.fn
f = open(fn,'w')
f.write(_general_example)
f.close()
assert_equal(mminfo(fn),(5,5,8,'coordinate','real','general'))
a = [[1, 0, 0, 6, 0],
[0, 10.5, 0, 0, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 33.32],
[0, 0, 0, 0, 12]]
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_read_hermitian(self):
fn = self.fn
f = open(fn,'w')
f.write(_hermitian_example)
f.close()
assert_equal(mminfo(fn),(5,5,7,'coordinate','complex','hermitian'))
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5 - 22.22j, 0],
[0, 0, .015, 0, 0],
[0, 250.5 + 22.22j, 0, -280, -33.32j],
[0, 0, 0, 33.32j, 12]]
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_read_skew(self):
fn = self.fn
f = open(fn,'w')
f.write(_skew_example)
f.close()
assert_equal(mminfo(fn),(5,5,7,'coordinate','real','skew-symmetric'))
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, -250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 0],
[0, 0, 0, 0, 12]]
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_read_symmetric(self):
fn = self.fn
f = open(fn,'w')
f.write(_symmetric_example)
f.close()
assert_equal(mminfo(fn),(5,5,7,'coordinate','real','symmetric'))
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 8],
[0, 0, 0, 8, 12]]
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_read_symmetric_pattern(self):
fn = self.fn
f = open(fn,'w')
f.write(_symmetric_pattern_example)
f.close()
assert_equal(mminfo(fn),(5,5,7,'coordinate','pattern','symmetric'))
a = [[1, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 1, 1]]
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_empty_write_read(self):
#http://projects.scipy.org/scipy/ticket/883
b = scipy.sparse.coo_matrix((10,10))
fn = self.fn
mmwrite(fn,b)
assert_equal(mminfo(fn),(10,10,0,'coordinate','real','general'))
a = b.todense()
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_bzip2_py3(self):
# test if fix for #2152 works
try:
# bz2 module isn't always built when building Python.
import bz2
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5))
fn = self.fn
mmwrite(fn, b)
fn_bzip2 = "%s.bz2" % fn
with open(fn, 'rb') as f_in:
f_out = bz2.BZ2File(fn_bzip2, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_bzip2).todense()
assert_array_almost_equal(a, b.todense())
def test_gzip_py3(self):
# test if fix for #2152 works
try:
# gzip module can be missing from Python installation
import gzip
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5))
fn = self.fn
mmwrite(fn, b)
fn_gzip = "%s.gz" % fn
with open(fn, 'rb') as f_in:
f_out = gzip.open(fn_gzip, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_gzip).todense()
assert_array_almost_equal(a, b.todense())
def test_real_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5))
fn = self.fn
mmwrite(fn,b)
assert_equal(mminfo(fn),(5,5,8,'coordinate','real','general'))
a = b.todense()
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_complex_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5))
fn = self.fn
mmwrite(fn,b)
assert_equal(mminfo(fn),(5,5,8,'coordinate','complex','general'))
a = b.todense()
b = mmread(fn).todense()
assert_array_almost_equal(a,b)
def test_sparse_formats(self):
mats = []
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
mats.append(scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)))
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
mats.append(scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)))
for mat in mats:
expected = mat.todense()
for fmt in ['csr','csc','coo']:
fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir
mmwrite(fn, mat.asformat(fmt))
result = mmread(fn).todense()
assert_array_almost_equal(result, expected)
if __name__ == "__main__":
run_module_suite()
|
matmutant/sl4a
|
refs/heads/master
|
python/src/Lib/json/tool.py
|
58
|
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -mjson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -mjson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0]))
try:
obj = json.load(infile)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
|
alokotosh/mm-master
|
refs/heads/master
|
mm/runner.py
|
658
|
import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
|
wtgme/labeldoc2vec
|
refs/heads/master
|
gensim/models/lsi_dispatcher.py
|
17
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LSI computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lsi_dispatcher
"""
from __future__ import with_statement
import os, sys, logging, threading, time
from six import iteritems, itervalues
try:
from Queue import Queue
except ImportError:
from queue import Queue
import Pyro4
from gensim import utils
logger = logging.getLogger("gensim.models.lsi_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
@Pyro4.expose
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to worker.initialize()).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
for name, uri in iteritems(ns.list(prefix='gensim.lsi_worker')):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i from %s" % (workerid, uri))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server" % uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in itervalues(self.workers)]
@Pyro4.expose
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job" % worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)" % (worker_id, self.jobs.qsize()))
return job
@Pyro4.expose
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)" % self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""
Merge projections from across all workers and return the final projection.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s" % (self._jobsdone, self._jobsreceived))
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers" % len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s" % workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
@Pyro4.expose
def reset(self):
"""
Initialize all workers for a new decomposition.
"""
for workerid, worker in iteritems(self.workers):
logger.info("resetting worker %s" % workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between dispatcher.jobdone()
worker.requestjob().
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i" % (workerid, self._jobsdone))
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through proxies"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in iteritems(self.workers):
logger.info("terminating worker %s" % workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
#endclass Dispatcher
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
if len(sys.argv) < 2:
maxsize = MAX_JOBS_QUEUE
else:
maxsize = int(sys.argv[1])
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=maxsize))
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
|
nicholasserra/sentry
|
refs/heads/master
|
src/sentry/migrations/0043_auto__chg_field_option_value__chg_field_projectoption_value.py
|
36
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('picklefield.fields.PickledObjectField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('picklefield.fields.PickledObjectField')())
def backwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('django.db.models.fields.TextField')())
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
cherrypy/cheroot
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
"""Cheroot package setuptools installer."""
import setuptools
if __name__ == '__main__':
setuptools.setup(use_scm_version=True)
|
chuan9/chromium-crosswalk
|
refs/heads/master
|
third_party/google_appengine_cloudstorage/cloudstorage/storage_api.py
|
102
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Python wrappers for the Google Storage RESTful API."""
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import logging
import os
import urlparse
from . import api_utils
from . import common
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance by default will talk to a local stub
unless common.ACCESS_TOKEN is set. That token will be used to talk
to the real GCS.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
if common.local_run() and not common.get_access_token():
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
"""A simple wrapper for the Google Storage RESTful API.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
All async methods have similar args and returns.
Args:
path: The path to the Google Storage object or bucket, e.g.
'/mybucket/myfile' or '/mybucket'.
**kwd: Options for urlfetch. e.g.
headers={'content-type': 'text/plain'}, payload='blah'.
Returns:
A ndb Future. When fulfilled, future.get_result() should return
a tuple of (status, headers, content) that represents a HTTP response
of Google Cloud Storage XML API.
"""
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
"""Store state as part of serialization/pickling.
Returns:
A tuple (of dictionaries) with the state of this object
"""
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the tuple from a __getstate__ call
"""
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Inherit docs.
This method translates urlfetch exceptions to more service specific ones.
"""
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError, e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
"""POST to an object."""
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
"""PUT an object."""
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
"""GET an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
"""GET a bucket."""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
"""A class for reading Google storage files."""
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
buffer_size: buffer size. The ReadBuffer keeps
one buffer. But there may be a pending future that contains
a second buffer. This size must be less than max_request_size.
max_request_size: Max bytes to request in one urlfetch.
"""
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = 0
self._buffer = _Buffer()
self._etag = None
self._request_next_buffer()
status, headers, _ = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers)
self._file_size = long(headers['content-length'])
self._check_etag(headers.get('etag'))
if self._file_size == 0:
self._buffer_future = None
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer.
"""
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer()
def __iter__(self):
"""Iterator interface.
Note the ReadBuffer container itself is the iterator. It's
(quote PEP0234)
'destructive: they consumes all the values and a second iterator
cannot easily be created that iterates independently over the same values.
You could open the file for the second time, or seek() to the beginning.'
Returns:
Self.
"""
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list)
def _remaining(self):
return self._file_size - self._offset
def _request_next_buffer(self):
"""Request next buffer.
Requires self._offset and self._buffer are in consistent state
"""
self._buffer_future = None
next_offset = self._offset + self._buffer.remaining()
if not hasattr(self, '_file_size') or next_offset != self._file_size:
self._buffer_future = self._get_segment(next_offset,
self._buffer_size)
def _get_segments(self, start, request_size):
"""Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
"""
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end-start))
return [fut.get_result() for fut in futures]
@ndb.tasklet
def _get_segment(self, start, request_size):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
Yields:
a segment [start, start + request_size) of the file.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
errors.check_status(status, [200, 206], self._path, headers, resp_headers)
self._check_etag(resp_headers.get('etag'))
raise ndb.Return(content)
def _check_etag(self, etag):
"""Check if etag is the same across requests to GCS.
If self._etag is None, set it. If etag is set, check that the new
etag equals the old one.
In the __init__ method, we fire one HEAD and one GET request using
ndb tasklet. One of them would return first and set the first value.
Args:
etag: etag from a GCS HTTP response. None if etag is not part of the
response header. It could be None for example in the case of GCS
composite file.
Raises:
ValueError: if two etags are not equal.
"""
if etag is None:
return
elif self._etag is None:
self._etag = etag
elif self._etag != etag:
raise ValueError('File on GCS has changed while reading.')
def close(self):
self.closed = True
self._buffer = None
self._buffer_future = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self._buffer.reset()
self._buffer_future = None
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._file_size + offset
else:
raise ValueError('Whence mode %s is invalid.' % str(whence))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self._offset
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
class _Buffer(object):
"""In memory buffer."""
def __init__(self):
self.reset()
def reset(self, content='', offset=0):
self._buffer = content
self._offset = offset
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset)
def read_to_offset(self, offset):
"""Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
"""
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
def remaining(self):
return len(self._buffer) - self._offset
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size)
class StreamingBuffer(object):
"""A class for creating large objects using the 'resumable' API.
The API is a subset of the Python writable stream API sufficient to
support writing zip files using the zipfile module.
The exact sequence of calls and use of headers is documented at
https://developers.google.com/storage/docs/developer-guide#unknownresumables
"""
_blocksize = 256 * 1024
_maxrequestsize = 16 * _blocksize
def __init__(self,
api,
path,
content_type=None,
gcs_headers=None):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
content_type: Optional content-type; Default value is
delegate to Google Cloud Storage.
gcs_headers: additional gs headers as a str->str dict, e.g
{'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
"""
assert self._maxrequestsize > self._blocksize
assert self._maxrequestsize % self._blocksize == 0
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
self._buffer = collections.deque()
self._buffered = 0
self._written = 0
self._offset = 0
headers = {'x-goog-resumable': 'start'}
if content_type:
headers['content-type'] = content_type
if gcs_headers:
headers.update(gcs_headers)
status, resp_headers, _ = self._api.post_object(path, headers=headers)
errors.check_status(status, [201], path, headers, resp_headers)
loc = resp_headers.get('location')
if not loc:
raise IOError('No location header found in 201 response')
parsed = urlparse.urlparse(loc)
self._path_with_token = '%s?%s' % (self._path, parsed.query)
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the write buffer are stored. Writes to the underlying
storage are required to be on block boundaries (_blocksize) except for the
last write. In the worst case the pickled version of this object may be
slightly larger than the blocksize.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'path_token': self._path_with_token,
'buffer': self._buffer,
'buffered': self._buffered,
'written': self._written,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
"""
self._api = state['api']
self._path_with_token = state['path_token']
self._buffer = state['buffer']
self._buffered = state['buffered']
self._written = state['written']
self._offset = state['offset']
self.closed = state['closed']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
def write(self, data):
"""Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
"""
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._blocksize:
self._flush()
def flush(self):
"""Dummy API.
This API is provided because the zipfile module uses it. It is a
no-op because Google Storage *requires* that all writes except for
the final one are multiples on 256K bytes aligned on 256K-byte
boundaries.
"""
self._check_open()
def tell(self):
"""Return the total number of bytes passed to write() so far.
(There is no seek() method.)
"""
return self._offset
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def _flush(self, finish=False):
"""Internal API to flush.
This is called only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
flush_len = 0 if finish else self._blocksize
while self._buffered >= flush_len:
buffer = []
buffered = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
buffer.append(buf)
buffered += size
if buffered >= self._maxrequestsize:
break
if buffered > self._maxrequestsize:
excess = buffered - self._maxrequestsize
elif finish:
excess = 0
else:
excess = buffered % self._blocksize
if excess:
over = buffer.pop()
size = len(over)
assert size >= excess
buffered -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
buffer.append(head)
buffered += len(head)
data = ''.join(buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, _ = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers,
{'upload_path': self._path_with_token})
def _get_offset_from_gcs(self):
"""Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
"""
headers = {'content-range': 'bytes */*'}
status, response_headers, _ = self._api.put_object(
self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers,
response_headers,
{'upload_path': self._path_with_token})
val = response_headers.get('range')
if val is None:
return -1
_, offset = val.rsplit('-', 1)
return int(offset)
def _force_close(self, file_length=None):
"""Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
"""
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
|
abomyi/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/migrations/0003_a3.py
|
282
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lookuperror_c', '0002_c2'),
('lookuperror_b', '0002_b2'),
('lookuperror_a', '0002_a2'),
]
operations = [
migrations.CreateModel(
name='A3',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('b2', models.ForeignKey('lookuperror_b.B2', models.CASCADE)),
('c2', models.ForeignKey('lookuperror_c.C2', models.CASCADE)),
],
),
]
|
mdmirabal/Parcial2-Prog3
|
refs/heads/master
|
zona.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
Zona = {
"Zona_1":["La Boca","Calzada de Amador"],
"Zona_2":["San Felipe","Chorrillo","Santa Ana","Ancón"],
"Zona_3":["Calidonia","San Miguel","Albrook","Altos de Diablo"],
"Zona_4":["Punta Paitilla","Bella Vista","Universidad"] ,
"Zona_5":["Punta Pacífica","El Dorado","Las Sabanas","Bethania"] ,
"Zona_6":["Panamá Viejo","Río Abajo","Villa de las Fuentes","Los Libertadores"] ,
"Zona_7":["Costa del Este","Chanis","Auto Motor"]
}
def zona(lugar):
datos =[]
for zon in Zona:
# for x in Zona[zon]:
if lugar in Zona[zon]:
datos.append("zona")
datos.append(zon)
return datos
# lugar = "Auto Motor"
# zona(lugar)
|
ctn-archive/hunsberger-neco2014
|
refs/heads/master
|
scripts/__init__.py
|
12133432
| |
ZompaSenior/fask
|
refs/heads/master
|
fask/src/fask_dj/task/migrations/__init__.py
|
12133432
| |
xuxiao19910803/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_self_assessment.py
|
92
|
from datetime import datetime
import json
import unittest
from mock import Mock, MagicMock
from webob.multidict import MultiDict
from pytz import UTC
from xblock.fields import ScopeIds
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from opaque_keys.edx.locations import Location
from lxml import etree
from . import get_test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric></rubric>'''
prompt = etree.XML("<prompt>This is sample prompt text.</prompt>")
definition = {
'rubric': rubric,
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
location = Location("edX", "sa_test", "run", "selfassessment", "SampleQuestion", None)
descriptor = Mock()
def setUp(self):
super(SelfAssessmentTest, self).setUp()
self.static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
system = get_test_system()
usage_key = system.course_id.make_usage_key('combinedopenended', 'test_loc')
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
system.xmodule_instance = Mock(scope_ids=scope_ids)
self.module = SelfAssessmentModule(
system,
self.location,
self.definition,
self.descriptor,
self.static_data
)
def test_get_html(self):
html = self.module.get_html(self.module.system)
self.assertTrue("This is sample prompt text" in html)
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
return responses[name]
def get_data_for_location(self, location, student):
return {
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
mock_query_dict.__getitem__.side_effect = get_fake_item
mock_query_dict.getall = get_fake_item
self.module.peer_gs.get_data_for_location = get_data_for_location
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
def test_self_assessment_display(self):
"""
Test storing an answer with the self assessment module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = SelfAssessmentModule(
get_test_system(),
self.location,
self.definition,
self.descriptor,
self.static_data
)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Submit a student response to the question.
test_module.handle_ajax("save_answer", {"student_answer": submitted_response}, get_test_system())
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
# Mock saving an assessment.
assessment_dict = MultiDict({'assessment': 0, 'score_list[]': 0})
data = test_module.handle_ajax("save_assessment", assessment_dict, get_test_system())
self.assertTrue(json.loads(data)['success'])
# Reset the module so the student can try again.
test_module.reset(get_test_system())
# Confirm that the right response is loaded.
self.assertEqual(test_module.get_display_answer(), submitted_response)
def test_save_assessment_after_closing(self):
"""
Test storing assessment when close date is passed.
"""
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
self.module.save_answer({'student_answer': "I am an answer"}, self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
#Set close date to current datetime.
self.module.close_date = datetime.now(UTC)
#Save assessment when close date is passed.
self.module.save_assessment(responses, self.module.system)
self.assertNotEqual(self.module.child_state, self.module.DONE)
|
practo/federation
|
refs/heads/master
|
tests/federation_api/people/model/test_person.py
|
1
|
import pytest
from sqlalchemy.exc import InvalidRequestError
from config.db import db
from federation_api.people.model import Person
from tests.factories.person_factory import PersonFactory
class TestPerson():
def test_new(self):
person = Person.new(name='name', email='a@b.com')
assert person.name == 'name'
assert person.email == 'a@b.com'
def test_save(self):
person = PersonFactory.build()
person.save()
assert person.id is not None
def test_create(self):
old_count = Person.count()
person = Person.create(name='name')
new_count = Person.count()
assert not person.errors
assert person.id
assert new_count - old_count == 1
def test_list(self):
PersonFactory.create_batch(3)
db.session.commit()
people = Person.list().all()
assert len(people) == 3
def test_first(self):
PersonFactory.create_batch(3)
db.session.commit()
person = Person.first()
assert person.id == 1
def test_last(self):
PersonFactory.create_batch(3)
db.session.commit()
person = Person.last()
assert person.id == 3
def test_list_with_deleted(self):
PersonFactory.create_batch(3)
db.session.commit()
Person.last().delete()
people = Person.list().all()
all_people = Person.list_with_deleted().all()
assert len(people) == 2
assert len(all_people) == 3
def test_where(self):
person = PersonFactory.create(name='name')
db.session.commit()
people = Person.where(name='name')
assert people.all()[0] == person
assert people.count() == 1
def test_find(self):
person = PersonFactory.create()
db.session.commit()
name_person = Person.find(1)
assert name_person.id == person.id
def test_find_by(self):
person = PersonFactory.create(name='name')
db.session.commit()
name_person = Person.find_by(name='name')
assert name_person.id == person.id
def test_destroy(self):
person = PersonFactory.create()
db.session.commit()
person.destroy()
destroy_person = Person.find(person.id)
assert destroy_person is None
def test_delete(self):
person = PersonFactory.create()
db.session.commit()
person = person.delete()
assert person.deleted_at is not None
|
skg-net/ansible
|
refs/heads/devel
|
test/units/template/test_tests_as_filters_warning.py
|
63
|
from ansible.template import Templar, display
from units.mock.loader import DictDataLoader
from jinja2.filters import FILTERS
from os.path import isabs
def test_tests_as_filters_warning(mocker):
fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
templar = Templar(loader=fake_loader, variables={})
filters = templar._get_filters(templar.environment.filters)
mocker.patch.object(display, 'deprecated')
# Call successful test, ensure the message is correct
filters['successful']({})
display.deprecated.assert_called_once_with(
'Using tests as filters is deprecated. Instead of using `result|successful` use `result is successful`', version='2.9'
)
# Call success test, ensure the message is correct
display.deprecated.reset_mock()
filters['success']({})
display.deprecated.assert_called_once_with(
'Using tests as filters is deprecated. Instead of using `result|success` use `result is success`', version='2.9'
)
# Call bool filter, ensure no deprecation message was displayed
display.deprecated.reset_mock()
filters['bool'](True)
assert display.deprecated.call_count == 0
# Ensure custom test does not override builtin filter
assert filters.get('abs') != isabs
|
zenieldanaku/pygpj
|
refs/heads/master
|
func/gen/__init__.py
|
15
|
# Dummy file to make this directory a package.
|
ashemedai/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/jabber.py
|
67
|
# Ansible CallBack module for Jabber (XMPP)
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
HAS_XMPP = True
try:
import xmpp
except ImportError:
HAS_XMPP = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'jabber'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
if not HAS_XMPP:
self._display.warning("The required python xmpp library (xmpppy) is not installed."
" pip install git+https://github.com/ArchipelProject/xmpppy")
self.disabled = True
self.serv = os.getenv('JABBER_SERV')
self.j_user = os.getenv('JABBER_USER')
self.j_pass = os.getenv('JABBER_PASS')
self.j_to = os.getenv('JABBER_TO')
if (self.j_user or self.j_pass or self.serv ) is None:
self.disabled = True
self._display.warning ('Jabber CallBack want JABBER_USER and JABBER_PASS env variables')
def send_msg(self, msg):
"""Send message"""
jid = xmpp.JID(self.j_user)
client = xmpp.Client(self.serv,debug=[])
client.connect(server=(self.serv,5222))
client.auth(jid.getNode(), self.j_pass, resource=jid.getResource())
message = xmpp.Message(self.j_to, msg)
message.setAttr('type', 'chat')
client.send(message)
client.disconnect()
def v2_runner_on_ok(self, result):
self._clean_results(result._result, result._task.action)
self.debug = self._dump_results(result._result)
def v2_playbook_on_task_start(self, task, is_conditional):
self.task = task
def v2_playbook_on_play_start(self, play):
"""Display Playbook and play start messages"""
self.play = play
name = play.name
self.send_msg("Ansible starting play: %s" % (name))
def playbook_on_stats(self, stats):
name = self.play
hosts = sorted(stats.processed.keys())
failures = False
unreachable = False
for h in hosts:
s = stats.summarize(h)
if s['failures'] > 0:
failures = True
if s['unreachable'] > 0:
unreachable = True
if failures or unreachable:
out = self.debug
self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
else:
out = self.debug
self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name,s, out))
|
denis-pitul/django
|
refs/heads/master
|
django/contrib/messages/storage/session.py
|
478
|
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
|
jangorecki/h2o-3
|
refs/heads/master
|
h2o-py/h2o/two_dim_table.py
|
1
|
# -*- encoding: utf-8 -*-
"""
A two dimensional table having row and column headers.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
from h2o.display import H2ODisplay
from h2o.exceptions import H2OValueError
from h2o.utils.compatibility import * # NOQA
from h2o.utils.shared_utils import _is_list_of_lists, can_use_pandas
from h2o.utils.typechecks import I, assert_is_type, is_type
class H2OTwoDimTable(object):
"""A class representing an 2D table (for pretty printing output)."""
def __init__(self, table_header=None, table_description=None, col_header=None,
cell_values=None, raw_cell_values=None, col_types=None, row_header=None, col_formats=None):
"""
Create new H2OTwoDimTable object.
:param table_header: Header for the entire table.
:param table_description: Longer description of the table.
:param col_header: list of column names (used in conjunction with)
:param cell_values: table values, as an array of individual rows
:param raw_cell_values:
:param col_types:
:param row_header: ignored.
:param col_formats: ignored.
"""
assert_is_type(table_header, None, str)
assert_is_type(table_description, None, str)
assert_is_type(col_header, None, [str])
assert_is_type(col_types, None, [str])
assert_is_type(cell_values, None, I([[object]], lambda m: all(len(row) == len(m[0]) for row in m)))
self._table_header = table_header
self._table_description = table_description
self._col_header = col_header
self._cell_values = cell_values or self._parse_values(raw_cell_values, col_types)
@staticmethod
def make(keyvals):
"""
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
"""
kwargs = {}
for key, value in keyvals:
if key == "columns":
kwargs["col_formats"] = [c["format"] for c in value]
kwargs["col_types"] = [c["type"] for c in value]
kwargs["col_header"] = [c["name"] for c in value]
kwargs["row_header"] = len(value)
if key == "name": kwargs["table_header"] = value
if key == "description": kwargs["table_description"] = value
if key == "data": kwargs["raw_cell_values"] = value
return H2OTwoDimTable(**kwargs)
@property
def cell_values(self):
"""The contents of the table, as a list of rows."""
return self._cell_values
@property
def col_header(self):
"""Array of column names."""
return self._col_header
def as_data_frame(self):
"""Convert to a python 'data frame'."""
if can_use_pandas():
import pandas
pandas.options.display.max_colwidth = 70
return pandas.DataFrame(self._cell_values, columns=self._col_header)
return self
def show(self, header=True):
"""Print the contents of this table."""
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(self._cell_values,columns=self._col_header)
# return
if header and self._table_header:
print(self._table_header + ":", end=' ')
if self._table_description: print(self._table_description)
print()
table = copy.deepcopy(self._cell_values)
nr = 0
if _is_list_of_lists(table): nr = len(
table) # only set if we truly have multiple rows... not just one long row :)
if nr > 20: # create a truncated view of the table, first/last 5 rows
trunc_table = []
trunc_table += [v for v in table[:5]]
trunc_table.append(["---"] * len(table[0]))
trunc_table += [v for v in table[(nr - 5):]]
table = trunc_table
H2ODisplay(table, self._col_header, numalign="left", stralign="left")
if nr > 20 and can_use_pandas(): print('\nSee the whole table with table.as_data_frame()')
def __repr__(self):
# FIXME: should return a string rather than printing it
self.show()
return ""
def _parse_values(self, values, types):
if self._col_header[0] is None:
self._col_header = self._col_header[1:]
types = types[1:]
values = values[1:]
for col_index, column in enumerate(values):
for row_index, row_value in enumerate(column):
if types[col_index] == 'integer':
values[col_index][row_index] = "" if row_value is None else int(float(row_value))
elif types[col_index] in ['double', 'float', 'long']:
values[col_index][row_index] = "" if row_value is None else float(row_value)
else: # string?
continue
return list(zip(*values)) # transpose the values! <3 splat ops
def __getitem__(self, item):
if is_type(item, int, str):
# single col selection returns list
if is_type(item, int):
index = item
if index < 0: index += len(self._col_header)
if index < 0 or index >= len(self._col_header):
raise H2OValueError("Index %d is out of range" % item)
else:
if item in self._col_header:
index = self._col_header.index(item)
else:
raise H2OValueError("Column `%s` does not exist in the table" % item)
return [row[index] for row in self._cell_values]
elif isinstance(item, slice):
# row selection if item is slice returns H2OTwoDimTable
# FIXME! slice behavior should be consistent with other selectors - return columns instead of rows...
self._cell_values = [self._cell_values[ii] for ii in range(*item.indices(len(self._cell_values)))]
return self
elif is_type(item, [int, str]):
# multiple col selection returns list of cols
return [self[i] for i in item]
else:
raise TypeError('can not support getting item for ' + str(item))
def __setitem__(self, key, value):
# This is not tested, and probably not used anywhere... That's why it's so horrible.
cols = list(zip(*self._cell_values))
if len(cols[0]) != len(value): raise ValueError('value must be same length as columns')
if key not in self._col_header:
self._col_header.append(key)
cols.append(tuple(value))
else:
cols[self._col_header.index(key)] = value
self._cell_values = [list(x) for x in zip(*cols)]
|
0hoo/libearth
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# libearth documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 23 04:04:29 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import os.path
import sys
__dir__ = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '..')))
from libearth.version import VERSION
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
if os.environ.get('READTHEDOCS'):
# As of early July 2013 ReadTheDocs.org only supports Sphinx 1.1.3.
# We need Sphinx >= 1.2 because of the following bug fix:
# https://bitbucket.org/birkenfeld/sphinx/pull-request/107/
# To workaround this we do monkeypatch Sphinx runtime on ReadTheDocs.org.
# FIXME: Remove this hack when ReadTheDocs.org officially supports 1.2
import inspect
from sphinx.ext.autodoc import AttributeDocumenter, ModuleDocumenter
from sphinx.util.inspect import isdescriptor
from sphinx.util.pycompat import class_types
def can_document_member(cls, member, membername, isattr, parent):
isdatadesc = isdescriptor(member) and not \
isinstance(member, cls.method_types) and not \
type(member).__name__ in ("type", "method_descriptor")
return isdatadesc or (not isinstance(parent, ModuleDocumenter)
and not inspect.isroutine(member)
and not isinstance(member, class_types))
AttributeDocumenter.can_document_member = classmethod(can_document_member)
html_context = {}
class PathList(list):
"""Fake list to ignore ReadTheDocs.org's hack."""
def insert(self, index, value):
if index == 0:
index = 1
html_context['rtd_hack_templates_path'] = value
super(PathList, self).insert(index, value)
def __reduce__(self):
return list, (list(self),)
else:
needs_sphinx = '1.2'
PathList = list
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = PathList(['_templates'])
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libearth'
copyright = u'2013, Hong Minhee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'borland'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_style = 'libearth.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} {1}'.format(project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.join('_static', 'libearth.svg')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libearthdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libearth.tex', u'libearth Documentation',
u'Hong Minhee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libearth', u'libearth Documentation',
[u'Hong Minhee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libearth', u'libearth Documentation',
u'Hong Minhee', 'libearth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'setuptools': ('https://pythonhosted.org/setuptools/', None)
}
extlinks = {
'issue': ('https://github.com/earthreader/libearth/issues/%s', 'issue #'),
'commit': ('https://github.com/earthreader/libearth/commit/%s', ''),
'branch': ('https://github.com/earthreader/libearth/compare/master...%s',
'')
}
# IronPython runtime mock
try:
import clr
except ImportError:
module = type(sys)
clr = module('clr')
clr.AddReference = lambda a: None
sys.modules['clr'] = clr
System = module('System')
System.IO = module('System.IO')
System.IO.Stream = object
System.Text = module('System.Text')
System.Xml = module('System.Xml')
sys.modules['System'] = System
sys.modules['System.IO'] = System.IO
sys.modules['System.Text'] = System.Text
sys.modules['System.Xml'] = System.Xml
|
sawankh/PowerKnowledge
|
refs/heads/master
|
src/dataCleanser/dsl/grammar/behaviour/readLine.py
|
6
|
#!/usr/bin/python
# Title: readLine.py
# Description: Contains the grammar and methods to read a specific line of a file
# Author: Sawan J. Kapai Harpalani
# Date: 2016-06-29
# Version: 0.1
# Usage: python readLine.py
# Notes:
# python_version: 2.6.6
# License: Copyright 200X Sawan J. Kapai Harpalani
# This file is part of MMOTracker. MMOTracker is free software: you can redistribute it and/or modify
# it under the terms of the GNU GeneralPublic License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version
# MMOTracker is distributed in the hope that it willbe useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESSFOR A PARTICULAR PURPOSE. See the GNU General
# PubliLicense for more details.You should have received a copy of the GNU GeneralPublic License along with MMOTracker.
# If not, seehttp://www.gnu.org/licenses/.
#==============================================================================
from pyparsing import *
"""Rules"""
comma = Suppress(Literal(","))
readLineReservedWord = Suppress(Keyword("readLine"))
fileName = QuotedString('"', escChar = "\\").setResultsName("fileName")
lineNumber = QuotedString('"', escChar = "\\").setResultsName("lineNumber")
leftBracket = Suppress(Literal("("))
rightBracket = Suppress(Literal(")"))
varID = Word(alphas, alphanums + "_").setResultsName("varID", listAllMatches = True)
readLineExpr = readLineReservedWord + leftBracket + (fileName + comma + (lineNumber | varID)) + rightBracket
def readLine(sList, varStack):
"""Joins strings and returns quoted string"""
resultString = ''
lineNu = ''
fileN = ''
if len(sList.varID) > 0:
for var in sList.varID.asList():
for item in varStack[:]:
if var == item[0]:
if item[1].startswith('"') and item[1].endswith('"'):
item[1] = item[1][1:-1]
lineNu += item[1]
if len(sList.fileName) > 0:
fileN = sList.fileName
if len(sList.lineNumber) > 0:
lineNu = sList.lineNumber
with open(fileN) as fp:
for i, line in enumerate(fp):
if i == (int(lineNu) - 1):
resultString = line.replace("\n", "")
resultString = resultString.replace("\r", "")
return "\"" + resultString + "\""
|
philoL/ndnSIM2.3-sdc
|
refs/heads/master
|
.waf-tools/version.py
|
24
|
# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
from waflib.Configure import conf
from waflib import Utils
import os
def splitVersion(version):
base = version.split('-')[0]
split = [v for v in base.split('.')]
return base, version, split
@conf
def getVersion(conf, submodule, **kw):
tagPrefix = kw.get('tag', '%s-' % submodule)
baseVersion = kw.get('base_version', '0.0.0')
submodule = conf.path.find_node(submodule)
gitVersion = baseVersion
didGetVersion = False
try:
cmd = ['git', 'describe', '--always', '--match', '%s*' % tagPrefix]
p = Utils.subprocess.Popen(cmd, stdout=Utils.subprocess.PIPE,
cwd=submodule.abspath(),
stderr=None, stdin=None)
out = str(p.communicate()[0].strip())
didGetVersion = (p.returncode == 0 and out != "")
if didGetVersion:
if out.startswith(tagPrefix):
gitVersion = out[len(tagPrefix):]
else:
gitVersion = "%s-commit-%s" % (baseVersion, out)
except OSError:
pass
versionFile = submodule.find_node('VERSION')
if not didGetVersion and versionFile is not None:
try:
return splitVersion(versionFile.read())
except (OSError, IOError):
pass
# version was obtained from git, update VERSION file if necessary
if versionFile is not None:
try:
version = versionFile.read()
versionFile = None # no need to update
except (OSError, IOError):
Logs.warn("VERSION file exists, but not readable")
else:
versionFile = submodule.make_node('VERSION')
if versionFile:
try:
versionFile.write(gitVersion)
except (OSError, IOError):
Logs.warn("VERSION file is not writeable")
return splitVersion(gitVersion)
|
ktonon/GameSoup
|
refs/heads/master
|
gamesoup/library/local_editing.py
|
1
|
'''
Local editing allows developers the ability to upload
and download Type.code as tar files. This module provides
functions for packing and unpacking these files to and
from the database.
'''
import os
import os.path
import shutil
import subprocess
from django.conf import settings
from gamesoup.library.models import *
__all__ = [
'pack_types',
'unpack_types',
]
def pack_types(username, outfile):
user_dir, tar_file = _get_paths(username)
os.mkdir(user_dir)
# Write the files
def make_file(type, field, extension):
path = os.path.join(user_dir, '%s.%s' % (type.name, extension))
f = open(path, 'w')
f.write(getattr(type, field))
f.close()
for type in Type.objects.all():
make_file(type, 'code', 'js')
# Tar them and read the tarred file into the outfile
os.chdir(settings.LOCAL_EDITING_WORKSPACE)
subprocess.call(['tar', '-cf', tar_file, username])
f = open(tar_file)
outfile.write(f.read())
f.close()
# Cleanup the workspace
os.remove(tar_file)
shutil.rmtree(user_dir)
return
def unpack_types(username, tarball):
messages = []
user_dir, tar_file = _get_paths(username)
f = open(tar_file, 'w')
f.write(tarball.read())
f.close()
os.chdir(settings.LOCAL_EDITING_WORKSPACE)
return_code = subprocess.call(['tar', '-xf', tar_file])
if return_code != 0:
raise ValueError('Invalid file format. Attempt to untar uploaded file failed.')
pattern = re.compile(r'^(?P<type_name>[^.]+)\.(?P<ext>css|js|html)$')
fieldmap = {
'css': 'style',
'js': 'code',
'html': 'body',
}
for filepath in os.listdir(user_dir):
match = pattern.match(filepath)
if match:
d = match.groupdict()
type_name = d['type_name']
field = fieldmap[d['ext']]
f = open(os.path.join(username, filepath))
try:
type = Type.objects.get(name=type_name)
setattr(type, field, f.read())
type.save()
messages.append('<strong>%s</strong>.<em>%s</em> updated...' % (type_name, field))
except Type.DoesNotExist:
messages.append('Unknown type <strong>%s</strong> for file <em>%s</em>' % (type_name, filepath))
f.close()
else:
messages.append('Unrecognized file <em>%s</em>' % filepath)
# Cleanup the workspace
os.remove(tar_file)
shutil.rmtree(user_dir)
return messages
###############################################################################
# LOCAL HELPERS
def _get_paths(username):
user_dir = os.path.join(settings.LOCAL_EDITING_WORKSPACE, username)
tar_file = '%s.tar' % username
return user_dir, tar_file
|
googlefonts/oss-fuzz
|
refs/heads/main
|
infra/build/functions/build_project.py
|
3
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#!/usr/bin/python2
"""Starts project build on Google Cloud Builder.
Usage: build_project.py <project_dir>
"""
from __future__ import print_function
import datetime
import json
import logging
import os
import re
import sys
import six
import yaml
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
import build_lib
FUZZING_BUILD_TAG = 'fuzzing'
GCB_LOGS_BUCKET = 'oss-fuzz-gcb-logs'
CONFIGURATIONS = {
'sanitizer-address': ['SANITIZER=address'],
'sanitizer-dataflow': ['SANITIZER=dataflow'],
'sanitizer-memory': ['SANITIZER=memory'],
'sanitizer-undefined': ['SANITIZER=undefined'],
'engine-libfuzzer': ['FUZZING_ENGINE=libfuzzer'],
'engine-afl': ['FUZZING_ENGINE=afl'],
'engine-honggfuzz': ['FUZZING_ENGINE=honggfuzz'],
'engine-dataflow': ['FUZZING_ENGINE=dataflow'],
'engine-none': ['FUZZING_ENGINE=none'],
}
DEFAULT_ARCHITECTURES = ['x86_64']
DEFAULT_ENGINES = ['libfuzzer', 'afl', 'honggfuzz']
DEFAULT_SANITIZERS = ['address', 'undefined']
LATEST_VERSION_FILENAME = 'latest.version'
LATEST_VERSION_CONTENT_TYPE = 'text/plain'
QUEUE_TTL_SECONDS = 60 * 60 * 24 # 24 hours.
def usage():
"""Exit with code 1 and display syntax to use this file."""
sys.stderr.write('Usage: ' + sys.argv[0] + ' <project_dir>\n')
sys.exit(1)
def set_yaml_defaults(project_name, project_yaml, image_project):
"""Set project.yaml's default parameters."""
project_yaml.setdefault('disabled', False)
project_yaml.setdefault('name', project_name)
project_yaml.setdefault('image',
'gcr.io/{0}/{1}'.format(image_project, project_name))
project_yaml.setdefault('architectures', DEFAULT_ARCHITECTURES)
project_yaml.setdefault('sanitizers', DEFAULT_SANITIZERS)
project_yaml.setdefault('fuzzing_engines', DEFAULT_ENGINES)
project_yaml.setdefault('run_tests', True)
project_yaml.setdefault('coverage_extra_args', '')
project_yaml.setdefault('labels', {})
def is_supported_configuration(fuzzing_engine, sanitizer, architecture):
"""Check if the given configuration is supported."""
fuzzing_engine_info = build_lib.ENGINE_INFO[fuzzing_engine]
if architecture == 'i386' and sanitizer != 'address':
return False
return (sanitizer in fuzzing_engine_info.supported_sanitizers and
architecture in fuzzing_engine_info.supported_architectures)
def get_sanitizers(project_yaml):
"""Retrieve sanitizers from project.yaml."""
sanitizers = project_yaml['sanitizers']
assert isinstance(sanitizers, list)
processed_sanitizers = []
for sanitizer in sanitizers:
if isinstance(sanitizer, six.string_types):
processed_sanitizers.append(sanitizer)
elif isinstance(sanitizer, dict):
for key in sanitizer.keys():
processed_sanitizers.append(key)
return processed_sanitizers
def workdir_from_dockerfile(dockerfile_lines):
"""Parse WORKDIR from the Dockerfile."""
workdir_regex = re.compile(r'\s*WORKDIR\s*([^\s]+)')
for line in dockerfile_lines:
match = re.match(workdir_regex, line)
if match:
# We need to escape '$' since they're used for subsitutions in Container
# Builer builds.
return match.group(1).replace('$', '$$')
return None
def load_project_yaml(project_name, project_yaml_file, image_project):
"""Loads project yaml and sets default values."""
project_yaml = yaml.safe_load(project_yaml_file)
set_yaml_defaults(project_name, project_yaml, image_project)
return project_yaml
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
def get_build_steps(project_name, project_yaml_file, dockerfile_lines,
image_project, base_images_project):
"""Returns build steps for project."""
project_yaml = load_project_yaml(project_name, project_yaml_file,
image_project)
if project_yaml['disabled']:
logging.info('Project "%s" is disabled.', project_name)
return []
name = project_yaml['name']
image = project_yaml['image']
language = project_yaml['language']
run_tests = project_yaml['run_tests']
time_stamp = datetime.datetime.now().strftime('%Y%m%d%H%M')
build_steps = build_lib.project_image_steps(name, image, language)
# Copy over MSan instrumented libraries.
build_steps.append({
'name': 'gcr.io/{0}/msan-libs-builder'.format(base_images_project),
'args': [
'bash',
'-c',
'cp -r /msan /workspace',
],
})
for fuzzing_engine in project_yaml['fuzzing_engines']:
for sanitizer in get_sanitizers(project_yaml):
for architecture in project_yaml['architectures']:
if not is_supported_configuration(fuzzing_engine, sanitizer,
architecture):
continue
env = CONFIGURATIONS['engine-' + fuzzing_engine][:]
env.extend(CONFIGURATIONS['sanitizer-' + sanitizer])
out = '/workspace/out/' + sanitizer
stamped_name = '-'.join([name, sanitizer, time_stamp])
latest_version_file = '-'.join(
[name, sanitizer, LATEST_VERSION_FILENAME])
zip_file = stamped_name + '.zip'
stamped_srcmap_file = stamped_name + '.srcmap.json'
bucket = build_lib.ENGINE_INFO[fuzzing_engine].upload_bucket
if architecture != 'x86_64':
bucket += '-' + architecture
upload_url = build_lib.get_signed_url(
build_lib.GCS_UPLOAD_URL_FORMAT.format(bucket, name, zip_file))
srcmap_url = build_lib.get_signed_url(
build_lib.GCS_UPLOAD_URL_FORMAT.format(bucket, name,
stamped_srcmap_file))
latest_version_url = build_lib.GCS_UPLOAD_URL_FORMAT.format(
bucket, name, latest_version_file)
latest_version_url = build_lib.get_signed_url(
latest_version_url, content_type=LATEST_VERSION_CONTENT_TYPE)
targets_list_filename = build_lib.get_targets_list_filename(sanitizer)
targets_list_url = build_lib.get_signed_url(
build_lib.get_targets_list_url(bucket, name, sanitizer))
env.append('OUT=' + out)
env.append('MSAN_LIBS_PATH=/workspace/msan')
env.append('ARCHITECTURE=' + architecture)
env.append('FUZZING_LANGUAGE=' + language)
workdir = workdir_from_dockerfile(dockerfile_lines)
if not workdir:
workdir = '/src'
failure_msg = ('*' * 80 + '\nFailed to build.\nTo reproduce, run:\n'
'python infra/helper.py build_image {name}\n'
'python infra/helper.py build_fuzzers --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n' + '*' * 80).format(
name=name,
sanitizer=sanitizer,
engine=fuzzing_engine,
architecture=architecture)
build_steps.append(
# compile
{
'name':
image,
'env':
env,
'args': [
'bash',
'-c',
# Remove /out to break loudly when a build script
# incorrectly uses /out instead of $OUT.
# `cd /src && cd {workdir}` (where {workdir} is parsed from
# the Dockerfile). Container Builder overrides our workdir
# so we need to add this step to set it back.
('rm -r /out && cd /src && cd {workdir} && mkdir -p {out} '
'&& compile || (echo "{failure_msg}" && false)'
).format(workdir=workdir, out=out, failure_msg=failure_msg),
],
})
if sanitizer == 'memory':
# Patch dynamic libraries to use instrumented ones.
build_steps.append({
'name':
'gcr.io/{0}/msan-libs-builder'.format(base_images_project),
'args': [
'bash',
'-c',
# TODO(ochang): Replace with just patch_build.py once
# permission in image is fixed.
'python /usr/local/bin/patch_build.py {0}'.format(out),
],
})
if run_tests:
failure_msg = ('*' * 80 + '\nBuild checks failed.\n'
'To reproduce, run:\n'
'python infra/helper.py build_image {name}\n'
'python infra/helper.py build_fuzzers --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n'
'python infra/helper.py check_build --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n' + '*' * 80).format(
name=name,
sanitizer=sanitizer,
engine=fuzzing_engine,
architecture=architecture)
build_steps.append(
# test binaries
{
'name':
'gcr.io/{0}/base-runner'.format(base_images_project),
'env':
env,
'args': [
'bash', '-c',
'test_all.py || (echo "{0}" && false)'.format(failure_msg)
],
})
if project_yaml['labels']:
# write target labels
build_steps.append({
'name':
image,
'env':
env,
'args': [
'/usr/local/bin/write_labels.py',
json.dumps(project_yaml['labels']),
out,
],
})
if sanitizer == 'dataflow' and fuzzing_engine == 'dataflow':
dataflow_steps = dataflow_post_build_steps(name, env,
base_images_project)
if dataflow_steps:
build_steps.extend(dataflow_steps)
else:
sys.stderr.write('Skipping dataflow post build steps.\n')
build_steps.extend([
# generate targets list
{
'name':
'gcr.io/{0}/base-runner'.format(base_images_project),
'env':
env,
'args': [
'bash',
'-c',
'targets_list > /workspace/{0}'.format(
targets_list_filename),
],
},
# zip binaries
{
'name':
image,
'args': [
'bash', '-c',
'cd {out} && zip -r {zip_file} *'.format(out=out,
zip_file=zip_file)
],
},
# upload srcmap
{
'name': 'gcr.io/{0}/uploader'.format(base_images_project),
'args': [
'/workspace/srcmap.json',
srcmap_url,
],
},
# upload binaries
{
'name': 'gcr.io/{0}/uploader'.format(base_images_project),
'args': [
os.path.join(out, zip_file),
upload_url,
],
},
# upload targets list
{
'name':
'gcr.io/{0}/uploader'.format(base_images_project),
'args': [
'/workspace/{0}'.format(targets_list_filename),
targets_list_url,
],
},
# upload the latest.version file
build_lib.http_upload_step(zip_file, latest_version_url,
LATEST_VERSION_CONTENT_TYPE),
# cleanup
{
'name': image,
'args': [
'bash',
'-c',
'rm -r ' + out,
],
},
])
return build_steps
def dataflow_post_build_steps(project_name, env, base_images_project):
"""Appends dataflow post build steps."""
steps = build_lib.download_corpora_steps(project_name)
if not steps:
return None
steps.append({
'name':
'gcr.io/{0}/base-runner'.format(base_images_project),
'env':
env + [
'COLLECT_DFT_TIMEOUT=2h',
'DFT_FILE_SIZE_LIMIT=65535',
'DFT_MIN_TIMEOUT=2.0',
'DFT_TIMEOUT_RANGE=6.0',
],
'args': [
'bash', '-c',
('for f in /corpus/*.zip; do unzip -q $f -d ${f%%.*}; done && '
'collect_dft || (echo "DFT collection failed." && false)')
],
'volumes': [{
'name': 'corpus',
'path': '/corpus'
}],
})
return steps
def get_logs_url(build_id, image_project='oss-fuzz'):
"""Returns url where logs are displayed for the build."""
url_format = ('https://console.developers.google.com/logs/viewer?'
'resource=build%2Fbuild_id%2F{0}&project={1}')
return url_format.format(build_id, image_project)
# pylint: disable=no-member
def run_build(build_steps, project_name, tag):
"""Run the build for given steps on cloud build."""
options = {}
if 'GCB_OPTIONS' in os.environ:
options = yaml.safe_load(os.environ['GCB_OPTIONS'])
build_body = {
'steps': build_steps,
'timeout': str(build_lib.BUILD_TIMEOUT) + 's',
'options': options,
'logsBucket': GCB_LOGS_BUCKET,
'tags': [project_name + '-' + tag,],
'queueTtl': str(QUEUE_TTL_SECONDS) + 's',
}
credentials = GoogleCredentials.get_application_default()
cloudbuild = build('cloudbuild',
'v1',
credentials=credentials,
cache_discovery=False)
build_info = cloudbuild.projects().builds().create(projectId='oss-fuzz',
body=build_body).execute()
build_id = build_info['metadata']['build']['id']
print('Logs:', get_logs_url(build_id), file=sys.stderr)
print(build_id)
def main():
"""Build and run projects."""
if len(sys.argv) != 2:
usage()
image_project = 'oss-fuzz'
base_images_project = 'oss-fuzz-base'
project_dir = sys.argv[1].rstrip(os.path.sep)
dockerfile_path = os.path.join(project_dir, 'Dockerfile')
project_yaml_path = os.path.join(project_dir, 'project.yaml')
project_name = os.path.basename(project_dir)
with open(dockerfile_path) as dockerfile:
dockerfile_lines = dockerfile.readlines()
with open(project_yaml_path) as project_yaml_file:
steps = get_build_steps(project_name, project_yaml_file, dockerfile_lines,
image_project, base_images_project)
run_build(steps, project_name, FUZZING_BUILD_TAG)
if __name__ == '__main__':
main()
|
andreparames/odoo
|
refs/heads/8.0
|
addons/sale/report/sale_report.py
|
37
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class sale_report(osv.osv):
_name = "sale.report"
_description = "Sales Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date Order', readonly=True), # TDE FIXME master: rename into date_order
'date_confirm': fields.date('Date Confirm', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'product_uom_qty': fields.float('# of Qty', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'state': fields.selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_order = 'date desc'
def _select(self):
select_str = """
SELECT min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * l.price_unit * (100.0-l.discount) / 100.0) as price_total,
count(*) as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
s.partner_id as partner_id,
s.user_id as user_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
l.state,
t.categ_id as categ_id,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
s.section_id as section_id
"""
return select_str
def _from(self):
from_str = """
sale_order_line l
join sale_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY l.product_id,
l.order_id,
t.uom_id,
t.categ_id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.company_id,
l.state,
s.pricelist_id,
s.project_id,
s.section_id
"""
return group_by_str
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM ( %s )
%s
)""" % (self._table, self._select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AMOboxTV/AMOBox.LegoBuild
|
refs/heads/master
|
script.module.youtube.dl/lib/youtube_dl/extractor/ign.py
|
50
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class IGNIE(InfoExtractor):
"""
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
Some videos of it.ign.com are also supported
"""
_VALID_URL = r'https?://.+?\.ign\.com/(?:[^/]+/)?(?P<type>videos|show_videos|articles|feature|(?:[^/]+/\d+/video))(/.+)?/(?P<name_or_id>.+)'
IE_NAME = 'ign.com'
_API_URL_TEMPLATE = 'http://apis.ign.com/video/v3/videos/%s'
_EMBED_RE = r'<iframe[^>]+?["\']((?:https?:)?//.+?\.ign\.com.+?/embed.+?)["\']'
_TESTS = [
{
'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
'md5': 'febda82c4bafecd2d44b6e1a18a595f8',
'info_dict': {
'id': '8f862beef863986b2785559b9e1aa599',
'ext': 'mp4',
'title': 'The Last of Us Review',
'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
'timestamp': 1370440800,
'upload_date': '20130605',
'uploader_id': 'cberidon@ign.com',
}
},
{
'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
'info_dict': {
'id': '100-little-things-in-gta-5-that-will-blow-your-mind',
},
'playlist': [
{
'info_dict': {
'id': '5ebbd138523268b93c9141af17bec937',
'ext': 'mp4',
'title': 'GTA 5 Video Review',
'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
'timestamp': 1379339880,
'upload_date': '20130916',
'uploader_id': 'danieljkrupa@gmail.com',
},
},
{
'info_dict': {
'id': '638672ee848ae4ff108df2a296418ee2',
'ext': 'mp4',
'title': '26 Twisted Moments from GTA 5 in Slow Motion',
'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
'timestamp': 1386878820,
'upload_date': '20131212',
'uploader_id': 'togilvie@ign.com',
},
},
],
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
'md5': '618fedb9c901fd086f6f093564ef8558',
'info_dict': {
'id': '078fdd005f6d3c02f63d795faa1b984f',
'ext': 'mp4',
'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
'description': 'Brian and Jared explore Michel Ancel\'s captivating new preview.',
'timestamp': 1408047180,
'upload_date': '20140814',
'uploader_id': 'jamesduggan1990@gmail.com',
},
},
{
'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s',
'only_matching': True,
},
{
'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds',
'only_matching': True,
},
]
def _find_video_id(self, webpage):
res_id = [
r'"video_id"\s*:\s*"(.*?)"',
r'class="hero-poster[^"]*?"[^>]*id="(.+?)"',
r'data-video-id="(.+?)"',
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
]
return self._search_regex(res_id, webpage, 'video id', default=None)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name_or_id = mobj.group('name_or_id')
page_type = mobj.group('type')
webpage = self._download_webpage(url, name_or_id)
if page_type != 'video':
multiple_urls = re.findall(
r'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
return {
'_type': 'playlist',
'id': name_or_id,
'entries': entries,
}
video_id = self._find_video_id(webpage)
if not video_id:
return self.url_result(self._search_regex(
self._EMBED_RE, webpage, 'embed url'))
return self._get_video_info(video_id)
def _get_video_info(self, video_id):
api_data = self._download_json(
self._API_URL_TEMPLATE % video_id, video_id)
formats = []
m3u8_url = api_data['refs'].get('m3uUrl')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
f4m_url = api_data['refs'].get('f4mUrl')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
for asset in api_data['assets']:
formats.append({
'url': asset['url'],
'tbr': asset.get('actual_bitrate_kbps'),
'fps': asset.get('frame_rate'),
'height': int_or_none(asset.get('height')),
'width': int_or_none(asset.get('width')),
})
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail['url']
} for thumbnail in api_data.get('thumbnails', [])]
metadata = api_data['metadata']
return {
'id': api_data.get('videoId') or video_id,
'title': metadata.get('longTitle') or metadata.get('name') or metadata.get['title'],
'description': metadata.get('description'),
'timestamp': parse_iso8601(metadata.get('publishDate')),
'duration': int_or_none(metadata.get('duration')),
'display_id': metadata.get('slug') or video_id,
'uploader_id': metadata.get('creator'),
'thumbnails': thumbnails,
'formats': formats,
}
class OneUPIE(IGNIE):
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
IE_NAME = '1up.com'
_TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976.html',
'md5': 'c9cc69e07acb675c31a16719f909e347',
'info_dict': {
'id': '34976',
'ext': 'mp4',
'title': 'Sniper Elite V2 - Trailer',
'description': 'md5:bf0516c5ee32a3217aa703e9b1bc7826',
'timestamp': 1313099220,
'upload_date': '20110811',
'uploader_id': 'IGN',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
result = super(OneUPIE, self)._real_extract(url)
result['id'] = mobj.group('name_or_id')
return result
class PCMagIE(IGNIE):
_VALID_URL = r'https?://(?:www\.)?pcmag\.com/(?P<type>videos|article2)(/.+)?/(?P<name_or_id>.+)'
IE_NAME = 'pcmag'
_EMBED_RE = r'iframe.setAttribute\("src",\s*__util.objToUrlString\("http://widgets\.ign\.com/video/embed/content.html?[^"]*url=([^"]+)["&]'
_TESTS = [{
'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data',
'md5': '212d6154fd0361a2781075f1febbe9ad',
'info_dict': {
'id': 'ee10d774b508c9b8ec07e763b9125b91',
'ext': 'mp4',
'title': '010615_What\'s New Now: Is GoGo Snooping on Your Data?',
'description': 'md5:a7071ae64d2f68cc821c729d4ded6bb3',
'timestamp': 1420571160,
'upload_date': '20150106',
'uploader_id': 'cozzipix@gmail.com',
}
}, {
'url': 'http://www.pcmag.com/article2/0,2817,2470156,00.asp',
'md5': '94130c1ca07ba0adb6088350681f16c1',
'info_dict': {
'id': '042e560ba94823d43afcb12ddf7142ca',
'ext': 'mp4',
'title': 'HTC\'s Weird New Re Camera - What\'s New Now',
'description': 'md5:53433c45df96d2ea5d0fda18be2ca908',
'timestamp': 1412953920,
'upload_date': '20141010',
'uploader_id': 'chris_snyder@pcmag.com',
}
}]
|
hltbra/story_parser
|
refs/heads/master
|
src/__init__.py
|
7
|
from parser import *
|
timm/timmnix
|
refs/heads/master
|
pypy3-v5.5.0-linux64/lib-python/3/msilib/schema.py
|
90
|
from . import Table
_Validation = Table('_Validation')
_Validation.add_field(1,'Table',11552)
_Validation.add_field(2,'Column',11552)
_Validation.add_field(3,'Nullable',3332)
_Validation.add_field(4,'MinValue',4356)
_Validation.add_field(5,'MaxValue',4356)
_Validation.add_field(6,'KeyTable',7679)
_Validation.add_field(7,'KeyColumn',5378)
_Validation.add_field(8,'Category',7456)
_Validation.add_field(9,'Set',7679)
_Validation.add_field(10,'Description',7679)
ActionText = Table('ActionText')
ActionText.add_field(1,'Action',11592)
ActionText.add_field(2,'Description',7936)
ActionText.add_field(3,'Template',7936)
AdminExecuteSequence = Table('AdminExecuteSequence')
AdminExecuteSequence.add_field(1,'Action',11592)
AdminExecuteSequence.add_field(2,'Condition',7679)
AdminExecuteSequence.add_field(3,'Sequence',5378)
Condition = Table('Condition')
Condition.add_field(1,'Feature_',11558)
Condition.add_field(2,'Level',9474)
Condition.add_field(3,'Condition',7679)
AdminUISequence = Table('AdminUISequence')
AdminUISequence.add_field(1,'Action',11592)
AdminUISequence.add_field(2,'Condition',7679)
AdminUISequence.add_field(3,'Sequence',5378)
AdvtExecuteSequence = Table('AdvtExecuteSequence')
AdvtExecuteSequence.add_field(1,'Action',11592)
AdvtExecuteSequence.add_field(2,'Condition',7679)
AdvtExecuteSequence.add_field(3,'Sequence',5378)
AdvtUISequence = Table('AdvtUISequence')
AdvtUISequence.add_field(1,'Action',11592)
AdvtUISequence.add_field(2,'Condition',7679)
AdvtUISequence.add_field(3,'Sequence',5378)
AppId = Table('AppId')
AppId.add_field(1,'AppId',11558)
AppId.add_field(2,'RemoteServerName',7679)
AppId.add_field(3,'LocalService',7679)
AppId.add_field(4,'ServiceParameters',7679)
AppId.add_field(5,'DllSurrogate',7679)
AppId.add_field(6,'ActivateAtStorage',5378)
AppId.add_field(7,'RunAsInteractiveUser',5378)
AppSearch = Table('AppSearch')
AppSearch.add_field(1,'Property',11592)
AppSearch.add_field(2,'Signature_',11592)
Property = Table('Property')
Property.add_field(1,'Property',11592)
Property.add_field(2,'Value',3840)
BBControl = Table('BBControl')
BBControl.add_field(1,'Billboard_',11570)
BBControl.add_field(2,'BBControl',11570)
BBControl.add_field(3,'Type',3378)
BBControl.add_field(4,'X',1282)
BBControl.add_field(5,'Y',1282)
BBControl.add_field(6,'Width',1282)
BBControl.add_field(7,'Height',1282)
BBControl.add_field(8,'Attributes',4356)
BBControl.add_field(9,'Text',7986)
Billboard = Table('Billboard')
Billboard.add_field(1,'Billboard',11570)
Billboard.add_field(2,'Feature_',3366)
Billboard.add_field(3,'Action',7474)
Billboard.add_field(4,'Ordering',5378)
Feature = Table('Feature')
Feature.add_field(1,'Feature',11558)
Feature.add_field(2,'Feature_Parent',7462)
Feature.add_field(3,'Title',8000)
Feature.add_field(4,'Description',8191)
Feature.add_field(5,'Display',5378)
Feature.add_field(6,'Level',1282)
Feature.add_field(7,'Directory_',7496)
Feature.add_field(8,'Attributes',1282)
Binary = Table('Binary')
Binary.add_field(1,'Name',11592)
Binary.add_field(2,'Data',2304)
BindImage = Table('BindImage')
BindImage.add_field(1,'File_',11592)
BindImage.add_field(2,'Path',7679)
File = Table('File')
File.add_field(1,'File',11592)
File.add_field(2,'Component_',3400)
File.add_field(3,'FileName',4095)
File.add_field(4,'FileSize',260)
File.add_field(5,'Version',7496)
File.add_field(6,'Language',7444)
File.add_field(7,'Attributes',5378)
File.add_field(8,'Sequence',1282)
CCPSearch = Table('CCPSearch')
CCPSearch.add_field(1,'Signature_',11592)
CheckBox = Table('CheckBox')
CheckBox.add_field(1,'Property',11592)
CheckBox.add_field(2,'Value',7488)
Class = Table('Class')
Class.add_field(1,'CLSID',11558)
Class.add_field(2,'Context',11552)
Class.add_field(3,'Component_',11592)
Class.add_field(4,'ProgId_Default',7679)
Class.add_field(5,'Description',8191)
Class.add_field(6,'AppId_',7462)
Class.add_field(7,'FileTypeMask',7679)
Class.add_field(8,'Icon_',7496)
Class.add_field(9,'IconIndex',5378)
Class.add_field(10,'DefInprocHandler',7456)
Class.add_field(11,'Argument',7679)
Class.add_field(12,'Feature_',3366)
Class.add_field(13,'Attributes',5378)
Component = Table('Component')
Component.add_field(1,'Component',11592)
Component.add_field(2,'ComponentId',7462)
Component.add_field(3,'Directory_',3400)
Component.add_field(4,'Attributes',1282)
Component.add_field(5,'Condition',7679)
Component.add_field(6,'KeyPath',7496)
Icon = Table('Icon')
Icon.add_field(1,'Name',11592)
Icon.add_field(2,'Data',2304)
ProgId = Table('ProgId')
ProgId.add_field(1,'ProgId',11775)
ProgId.add_field(2,'ProgId_Parent',7679)
ProgId.add_field(3,'Class_',7462)
ProgId.add_field(4,'Description',8191)
ProgId.add_field(5,'Icon_',7496)
ProgId.add_field(6,'IconIndex',5378)
ComboBox = Table('ComboBox')
ComboBox.add_field(1,'Property',11592)
ComboBox.add_field(2,'Order',9474)
ComboBox.add_field(3,'Value',3392)
ComboBox.add_field(4,'Text',8000)
CompLocator = Table('CompLocator')
CompLocator.add_field(1,'Signature_',11592)
CompLocator.add_field(2,'ComponentId',3366)
CompLocator.add_field(3,'Type',5378)
Complus = Table('Complus')
Complus.add_field(1,'Component_',11592)
Complus.add_field(2,'ExpType',13570)
Directory = Table('Directory')
Directory.add_field(1,'Directory',11592)
Directory.add_field(2,'Directory_Parent',7496)
Directory.add_field(3,'DefaultDir',4095)
Control = Table('Control')
Control.add_field(1,'Dialog_',11592)
Control.add_field(2,'Control',11570)
Control.add_field(3,'Type',3348)
Control.add_field(4,'X',1282)
Control.add_field(5,'Y',1282)
Control.add_field(6,'Width',1282)
Control.add_field(7,'Height',1282)
Control.add_field(8,'Attributes',4356)
Control.add_field(9,'Property',7474)
Control.add_field(10,'Text',7936)
Control.add_field(11,'Control_Next',7474)
Control.add_field(12,'Help',7986)
Dialog = Table('Dialog')
Dialog.add_field(1,'Dialog',11592)
Dialog.add_field(2,'HCentering',1282)
Dialog.add_field(3,'VCentering',1282)
Dialog.add_field(4,'Width',1282)
Dialog.add_field(5,'Height',1282)
Dialog.add_field(6,'Attributes',4356)
Dialog.add_field(7,'Title',8064)
Dialog.add_field(8,'Control_First',3378)
Dialog.add_field(9,'Control_Default',7474)
Dialog.add_field(10,'Control_Cancel',7474)
ControlCondition = Table('ControlCondition')
ControlCondition.add_field(1,'Dialog_',11592)
ControlCondition.add_field(2,'Control_',11570)
ControlCondition.add_field(3,'Action',11570)
ControlCondition.add_field(4,'Condition',11775)
ControlEvent = Table('ControlEvent')
ControlEvent.add_field(1,'Dialog_',11592)
ControlEvent.add_field(2,'Control_',11570)
ControlEvent.add_field(3,'Event',11570)
ControlEvent.add_field(4,'Argument',11775)
ControlEvent.add_field(5,'Condition',15871)
ControlEvent.add_field(6,'Ordering',5378)
CreateFolder = Table('CreateFolder')
CreateFolder.add_field(1,'Directory_',11592)
CreateFolder.add_field(2,'Component_',11592)
CustomAction = Table('CustomAction')
CustomAction.add_field(1,'Action',11592)
CustomAction.add_field(2,'Type',1282)
CustomAction.add_field(3,'Source',7496)
CustomAction.add_field(4,'Target',7679)
DrLocator = Table('DrLocator')
DrLocator.add_field(1,'Signature_',11592)
DrLocator.add_field(2,'Parent',15688)
DrLocator.add_field(3,'Path',15871)
DrLocator.add_field(4,'Depth',5378)
DuplicateFile = Table('DuplicateFile')
DuplicateFile.add_field(1,'FileKey',11592)
DuplicateFile.add_field(2,'Component_',3400)
DuplicateFile.add_field(3,'File_',3400)
DuplicateFile.add_field(4,'DestName',8191)
DuplicateFile.add_field(5,'DestFolder',7496)
Environment = Table('Environment')
Environment.add_field(1,'Environment',11592)
Environment.add_field(2,'Name',4095)
Environment.add_field(3,'Value',8191)
Environment.add_field(4,'Component_',3400)
Error = Table('Error')
Error.add_field(1,'Error',9474)
Error.add_field(2,'Message',7936)
EventMapping = Table('EventMapping')
EventMapping.add_field(1,'Dialog_',11592)
EventMapping.add_field(2,'Control_',11570)
EventMapping.add_field(3,'Event',11570)
EventMapping.add_field(4,'Attribute',3378)
Extension = Table('Extension')
Extension.add_field(1,'Extension',11775)
Extension.add_field(2,'Component_',11592)
Extension.add_field(3,'ProgId_',7679)
Extension.add_field(4,'MIME_',7488)
Extension.add_field(5,'Feature_',3366)
MIME = Table('MIME')
MIME.add_field(1,'ContentType',11584)
MIME.add_field(2,'Extension_',3583)
MIME.add_field(3,'CLSID',7462)
FeatureComponents = Table('FeatureComponents')
FeatureComponents.add_field(1,'Feature_',11558)
FeatureComponents.add_field(2,'Component_',11592)
FileSFPCatalog = Table('FileSFPCatalog')
FileSFPCatalog.add_field(1,'File_',11592)
FileSFPCatalog.add_field(2,'SFPCatalog_',11775)
SFPCatalog = Table('SFPCatalog')
SFPCatalog.add_field(1,'SFPCatalog',11775)
SFPCatalog.add_field(2,'Catalog',2304)
SFPCatalog.add_field(3,'Dependency',7424)
Font = Table('Font')
Font.add_field(1,'File_',11592)
Font.add_field(2,'FontTitle',7552)
IniFile = Table('IniFile')
IniFile.add_field(1,'IniFile',11592)
IniFile.add_field(2,'FileName',4095)
IniFile.add_field(3,'DirProperty',7496)
IniFile.add_field(4,'Section',3936)
IniFile.add_field(5,'Key',3968)
IniFile.add_field(6,'Value',4095)
IniFile.add_field(7,'Action',1282)
IniFile.add_field(8,'Component_',3400)
IniLocator = Table('IniLocator')
IniLocator.add_field(1,'Signature_',11592)
IniLocator.add_field(2,'FileName',3583)
IniLocator.add_field(3,'Section',3424)
IniLocator.add_field(4,'Key',3456)
IniLocator.add_field(5,'Field',5378)
IniLocator.add_field(6,'Type',5378)
InstallExecuteSequence = Table('InstallExecuteSequence')
InstallExecuteSequence.add_field(1,'Action',11592)
InstallExecuteSequence.add_field(2,'Condition',7679)
InstallExecuteSequence.add_field(3,'Sequence',5378)
InstallUISequence = Table('InstallUISequence')
InstallUISequence.add_field(1,'Action',11592)
InstallUISequence.add_field(2,'Condition',7679)
InstallUISequence.add_field(3,'Sequence',5378)
IsolatedComponent = Table('IsolatedComponent')
IsolatedComponent.add_field(1,'Component_Shared',11592)
IsolatedComponent.add_field(2,'Component_Application',11592)
LaunchCondition = Table('LaunchCondition')
LaunchCondition.add_field(1,'Condition',11775)
LaunchCondition.add_field(2,'Description',4095)
ListBox = Table('ListBox')
ListBox.add_field(1,'Property',11592)
ListBox.add_field(2,'Order',9474)
ListBox.add_field(3,'Value',3392)
ListBox.add_field(4,'Text',8000)
ListView = Table('ListView')
ListView.add_field(1,'Property',11592)
ListView.add_field(2,'Order',9474)
ListView.add_field(3,'Value',3392)
ListView.add_field(4,'Text',8000)
ListView.add_field(5,'Binary_',7496)
LockPermissions = Table('LockPermissions')
LockPermissions.add_field(1,'LockObject',11592)
LockPermissions.add_field(2,'Table',11552)
LockPermissions.add_field(3,'Domain',15871)
LockPermissions.add_field(4,'User',11775)
LockPermissions.add_field(5,'Permission',4356)
Media = Table('Media')
Media.add_field(1,'DiskId',9474)
Media.add_field(2,'LastSequence',1282)
Media.add_field(3,'DiskPrompt',8000)
Media.add_field(4,'Cabinet',7679)
Media.add_field(5,'VolumeLabel',7456)
Media.add_field(6,'Source',7496)
MoveFile = Table('MoveFile')
MoveFile.add_field(1,'FileKey',11592)
MoveFile.add_field(2,'Component_',3400)
MoveFile.add_field(3,'SourceName',8191)
MoveFile.add_field(4,'DestName',8191)
MoveFile.add_field(5,'SourceFolder',7496)
MoveFile.add_field(6,'DestFolder',3400)
MoveFile.add_field(7,'Options',1282)
MsiAssembly = Table('MsiAssembly')
MsiAssembly.add_field(1,'Component_',11592)
MsiAssembly.add_field(2,'Feature_',3366)
MsiAssembly.add_field(3,'File_Manifest',7496)
MsiAssembly.add_field(4,'File_Application',7496)
MsiAssembly.add_field(5,'Attributes',5378)
MsiAssemblyName = Table('MsiAssemblyName')
MsiAssemblyName.add_field(1,'Component_',11592)
MsiAssemblyName.add_field(2,'Name',11775)
MsiAssemblyName.add_field(3,'Value',3583)
MsiDigitalCertificate = Table('MsiDigitalCertificate')
MsiDigitalCertificate.add_field(1,'DigitalCertificate',11592)
MsiDigitalCertificate.add_field(2,'CertData',2304)
MsiDigitalSignature = Table('MsiDigitalSignature')
MsiDigitalSignature.add_field(1,'Table',11552)
MsiDigitalSignature.add_field(2,'SignObject',11592)
MsiDigitalSignature.add_field(3,'DigitalCertificate_',3400)
MsiDigitalSignature.add_field(4,'Hash',6400)
MsiFileHash = Table('MsiFileHash')
MsiFileHash.add_field(1,'File_',11592)
MsiFileHash.add_field(2,'Options',1282)
MsiFileHash.add_field(3,'HashPart1',260)
MsiFileHash.add_field(4,'HashPart2',260)
MsiFileHash.add_field(5,'HashPart3',260)
MsiFileHash.add_field(6,'HashPart4',260)
MsiPatchHeaders = Table('MsiPatchHeaders')
MsiPatchHeaders.add_field(1,'StreamRef',11558)
MsiPatchHeaders.add_field(2,'Header',2304)
ODBCAttribute = Table('ODBCAttribute')
ODBCAttribute.add_field(1,'Driver_',11592)
ODBCAttribute.add_field(2,'Attribute',11560)
ODBCAttribute.add_field(3,'Value',8191)
ODBCDriver = Table('ODBCDriver')
ODBCDriver.add_field(1,'Driver',11592)
ODBCDriver.add_field(2,'Component_',3400)
ODBCDriver.add_field(3,'Description',3583)
ODBCDriver.add_field(4,'File_',3400)
ODBCDriver.add_field(5,'File_Setup',7496)
ODBCDataSource = Table('ODBCDataSource')
ODBCDataSource.add_field(1,'DataSource',11592)
ODBCDataSource.add_field(2,'Component_',3400)
ODBCDataSource.add_field(3,'Description',3583)
ODBCDataSource.add_field(4,'DriverDescription',3583)
ODBCDataSource.add_field(5,'Registration',1282)
ODBCSourceAttribute = Table('ODBCSourceAttribute')
ODBCSourceAttribute.add_field(1,'DataSource_',11592)
ODBCSourceAttribute.add_field(2,'Attribute',11552)
ODBCSourceAttribute.add_field(3,'Value',8191)
ODBCTranslator = Table('ODBCTranslator')
ODBCTranslator.add_field(1,'Translator',11592)
ODBCTranslator.add_field(2,'Component_',3400)
ODBCTranslator.add_field(3,'Description',3583)
ODBCTranslator.add_field(4,'File_',3400)
ODBCTranslator.add_field(5,'File_Setup',7496)
Patch = Table('Patch')
Patch.add_field(1,'File_',11592)
Patch.add_field(2,'Sequence',9474)
Patch.add_field(3,'PatchSize',260)
Patch.add_field(4,'Attributes',1282)
Patch.add_field(5,'Header',6400)
Patch.add_field(6,'StreamRef_',7462)
PatchPackage = Table('PatchPackage')
PatchPackage.add_field(1,'PatchId',11558)
PatchPackage.add_field(2,'Media_',1282)
PublishComponent = Table('PublishComponent')
PublishComponent.add_field(1,'ComponentId',11558)
PublishComponent.add_field(2,'Qualifier',11775)
PublishComponent.add_field(3,'Component_',11592)
PublishComponent.add_field(4,'AppData',8191)
PublishComponent.add_field(5,'Feature_',3366)
RadioButton = Table('RadioButton')
RadioButton.add_field(1,'Property',11592)
RadioButton.add_field(2,'Order',9474)
RadioButton.add_field(3,'Value',3392)
RadioButton.add_field(4,'X',1282)
RadioButton.add_field(5,'Y',1282)
RadioButton.add_field(6,'Width',1282)
RadioButton.add_field(7,'Height',1282)
RadioButton.add_field(8,'Text',8000)
RadioButton.add_field(9,'Help',7986)
Registry = Table('Registry')
Registry.add_field(1,'Registry',11592)
Registry.add_field(2,'Root',1282)
Registry.add_field(3,'Key',4095)
Registry.add_field(4,'Name',8191)
Registry.add_field(5,'Value',7936)
Registry.add_field(6,'Component_',3400)
RegLocator = Table('RegLocator')
RegLocator.add_field(1,'Signature_',11592)
RegLocator.add_field(2,'Root',1282)
RegLocator.add_field(3,'Key',3583)
RegLocator.add_field(4,'Name',7679)
RegLocator.add_field(5,'Type',5378)
RemoveFile = Table('RemoveFile')
RemoveFile.add_field(1,'FileKey',11592)
RemoveFile.add_field(2,'Component_',3400)
RemoveFile.add_field(3,'FileName',8191)
RemoveFile.add_field(4,'DirProperty',3400)
RemoveFile.add_field(5,'InstallMode',1282)
RemoveIniFile = Table('RemoveIniFile')
RemoveIniFile.add_field(1,'RemoveIniFile',11592)
RemoveIniFile.add_field(2,'FileName',4095)
RemoveIniFile.add_field(3,'DirProperty',7496)
RemoveIniFile.add_field(4,'Section',3936)
RemoveIniFile.add_field(5,'Key',3968)
RemoveIniFile.add_field(6,'Value',8191)
RemoveIniFile.add_field(7,'Action',1282)
RemoveIniFile.add_field(8,'Component_',3400)
RemoveRegistry = Table('RemoveRegistry')
RemoveRegistry.add_field(1,'RemoveRegistry',11592)
RemoveRegistry.add_field(2,'Root',1282)
RemoveRegistry.add_field(3,'Key',4095)
RemoveRegistry.add_field(4,'Name',8191)
RemoveRegistry.add_field(5,'Component_',3400)
ReserveCost = Table('ReserveCost')
ReserveCost.add_field(1,'ReserveKey',11592)
ReserveCost.add_field(2,'Component_',3400)
ReserveCost.add_field(3,'ReserveFolder',7496)
ReserveCost.add_field(4,'ReserveLocal',260)
ReserveCost.add_field(5,'ReserveSource',260)
SelfReg = Table('SelfReg')
SelfReg.add_field(1,'File_',11592)
SelfReg.add_field(2,'Cost',5378)
ServiceControl = Table('ServiceControl')
ServiceControl.add_field(1,'ServiceControl',11592)
ServiceControl.add_field(2,'Name',4095)
ServiceControl.add_field(3,'Event',1282)
ServiceControl.add_field(4,'Arguments',8191)
ServiceControl.add_field(5,'Wait',5378)
ServiceControl.add_field(6,'Component_',3400)
ServiceInstall = Table('ServiceInstall')
ServiceInstall.add_field(1,'ServiceInstall',11592)
ServiceInstall.add_field(2,'Name',3583)
ServiceInstall.add_field(3,'DisplayName',8191)
ServiceInstall.add_field(4,'ServiceType',260)
ServiceInstall.add_field(5,'StartType',260)
ServiceInstall.add_field(6,'ErrorControl',260)
ServiceInstall.add_field(7,'LoadOrderGroup',7679)
ServiceInstall.add_field(8,'Dependencies',7679)
ServiceInstall.add_field(9,'StartName',7679)
ServiceInstall.add_field(10,'Password',7679)
ServiceInstall.add_field(11,'Arguments',7679)
ServiceInstall.add_field(12,'Component_',3400)
ServiceInstall.add_field(13,'Description',8191)
Shortcut = Table('Shortcut')
Shortcut.add_field(1,'Shortcut',11592)
Shortcut.add_field(2,'Directory_',3400)
Shortcut.add_field(3,'Name',3968)
Shortcut.add_field(4,'Component_',3400)
Shortcut.add_field(5,'Target',3400)
Shortcut.add_field(6,'Arguments',7679)
Shortcut.add_field(7,'Description',8191)
Shortcut.add_field(8,'Hotkey',5378)
Shortcut.add_field(9,'Icon_',7496)
Shortcut.add_field(10,'IconIndex',5378)
Shortcut.add_field(11,'ShowCmd',5378)
Shortcut.add_field(12,'WkDir',7496)
Signature = Table('Signature')
Signature.add_field(1,'Signature',11592)
Signature.add_field(2,'FileName',3583)
Signature.add_field(3,'MinVersion',7444)
Signature.add_field(4,'MaxVersion',7444)
Signature.add_field(5,'MinSize',4356)
Signature.add_field(6,'MaxSize',4356)
Signature.add_field(7,'MinDate',4356)
Signature.add_field(8,'MaxDate',4356)
Signature.add_field(9,'Languages',7679)
TextStyle = Table('TextStyle')
TextStyle.add_field(1,'TextStyle',11592)
TextStyle.add_field(2,'FaceName',3360)
TextStyle.add_field(3,'Size',1282)
TextStyle.add_field(4,'Color',4356)
TextStyle.add_field(5,'StyleBits',5378)
TypeLib = Table('TypeLib')
TypeLib.add_field(1,'LibID',11558)
TypeLib.add_field(2,'Language',9474)
TypeLib.add_field(3,'Component_',11592)
TypeLib.add_field(4,'Version',4356)
TypeLib.add_field(5,'Description',8064)
TypeLib.add_field(6,'Directory_',7496)
TypeLib.add_field(7,'Feature_',3366)
TypeLib.add_field(8,'Cost',4356)
UIText = Table('UIText')
UIText.add_field(1,'Key',11592)
UIText.add_field(2,'Text',8191)
Upgrade = Table('Upgrade')
Upgrade.add_field(1,'UpgradeCode',11558)
Upgrade.add_field(2,'VersionMin',15636)
Upgrade.add_field(3,'VersionMax',15636)
Upgrade.add_field(4,'Language',15871)
Upgrade.add_field(5,'Attributes',8452)
Upgrade.add_field(6,'Remove',7679)
Upgrade.add_field(7,'ActionProperty',3400)
Verb = Table('Verb')
Verb.add_field(1,'Extension_',11775)
Verb.add_field(2,'Verb',11552)
Verb.add_field(3,'Sequence',5378)
Verb.add_field(4,'Command',8191)
Verb.add_field(5,'Argument',8191)
tables=[_Validation, ActionText, AdminExecuteSequence, Condition, AdminUISequence, AdvtExecuteSequence, AdvtUISequence, AppId, AppSearch, Property, BBControl, Billboard, Feature, Binary, BindImage, File, CCPSearch, CheckBox, Class, Component, Icon, ProgId, ComboBox, CompLocator, Complus, Directory, Control, Dialog, ControlCondition, ControlEvent, CreateFolder, CustomAction, DrLocator, DuplicateFile, Environment, Error, EventMapping, Extension, MIME, FeatureComponents, FileSFPCatalog, SFPCatalog, Font, IniFile, IniLocator, InstallExecuteSequence, InstallUISequence, IsolatedComponent, LaunchCondition, ListBox, ListView, LockPermissions, Media, MoveFile, MsiAssembly, MsiAssemblyName, MsiDigitalCertificate, MsiDigitalSignature, MsiFileHash, MsiPatchHeaders, ODBCAttribute, ODBCDriver, ODBCDataSource, ODBCSourceAttribute, ODBCTranslator, Patch, PatchPackage, PublishComponent, RadioButton, Registry, RegLocator, RemoveFile, RemoveIniFile, RemoveRegistry, ReserveCost, SelfReg, ServiceControl, ServiceInstall, Shortcut, Signature, TextStyle, TypeLib, UIText, Upgrade, Verb]
_Validation_records = [
('_Validation','Table','N',None, None, None, None, 'Identifier',None, 'Name of table',),
('_Validation','Column','N',None, None, None, None, 'Identifier',None, 'Name of column',),
('_Validation','Description','Y',None, None, None, None, 'Text',None, 'Description of column',),
('_Validation','Set','Y',None, None, None, None, 'Text',None, 'Set of values that are permitted',),
('_Validation','Category','Y',None, None, None, None, None, 'Text;Formatted;Template;Condition;Guid;Path;Version;Language;Identifier;Binary;UpperCase;LowerCase;Filename;Paths;AnyPath;WildCardFilename;RegPath;KeyFormatted;CustomSource;Property;Cabinet;Shortcut;URL','String category',),
('_Validation','KeyColumn','Y',1,32,None, None, None, None, 'Column to which foreign key connects',),
('_Validation','KeyTable','Y',None, None, None, None, 'Identifier',None, 'For foreign key, Name of table to which data must link',),
('_Validation','MaxValue','Y',-2147483647,2147483647,None, None, None, None, 'Maximum value allowed',),
('_Validation','MinValue','Y',-2147483647,2147483647,None, None, None, None, 'Minimum value allowed',),
('_Validation','Nullable','N',None, None, None, None, None, 'Y;N;@','Whether the column is nullable',),
('ActionText','Description','Y',None, None, None, None, 'Text',None, 'Localized description displayed in progress dialog and log when action is executing.',),
('ActionText','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to be described.',),
('ActionText','Template','Y',None, None, None, None, 'Template',None, 'Optional localized format template used to format action data records for display during action execution.',),
('AdminExecuteSequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('AdminExecuteSequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('AdminExecuteSequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('Condition','Condition','Y',None, None, None, None, 'Condition',None, 'Expression evaluated to determine if Level in the Feature table is to change.',),
('Condition','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Reference to a Feature entry in Feature table.',),
('Condition','Level','N',0,32767,None, None, None, None, 'New selection Level to set in Feature table if Condition evaluates to TRUE.',),
('AdminUISequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('AdminUISequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('AdminUISequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('AdvtExecuteSequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('AdvtExecuteSequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('AdvtExecuteSequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('AdvtUISequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('AdvtUISequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('AdvtUISequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('AppId','AppId','N',None, None, None, None, 'Guid',None, None, ),
('AppId','ActivateAtStorage','Y',0,1,None, None, None, None, None, ),
('AppId','DllSurrogate','Y',None, None, None, None, 'Text',None, None, ),
('AppId','LocalService','Y',None, None, None, None, 'Text',None, None, ),
('AppId','RemoteServerName','Y',None, None, None, None, 'Formatted',None, None, ),
('AppId','RunAsInteractiveUser','Y',0,1,None, None, None, None, None, ),
('AppId','ServiceParameters','Y',None, None, None, None, 'Text',None, None, ),
('AppSearch','Property','N',None, None, None, None, 'Identifier',None, 'The property associated with a Signature',),
('AppSearch','Signature_','N',None, None, 'Signature;RegLocator;IniLocator;DrLocator;CompLocator',1,'Identifier',None, 'The Signature_ represents a unique file signature and is also the foreign key in the Signature, RegLocator, IniLocator, CompLocator and the DrLocator tables.',),
('Property','Property','N',None, None, None, None, 'Identifier',None, 'Name of property, uppercase if settable by launcher or loader.',),
('Property','Value','N',None, None, None, None, 'Text',None, 'String value for property. Never null or empty.',),
('BBControl','Type','N',None, None, None, None, 'Identifier',None, 'The type of the control.',),
('BBControl','Y','N',0,32767,None, None, None, None, 'Vertical coordinate of the upper left corner of the bounding rectangle of the control.',),
('BBControl','Text','Y',None, None, None, None, 'Text',None, 'A string used to set the initial text contained within a control (if appropriate).',),
('BBControl','BBControl','N',None, None, None, None, 'Identifier',None, 'Name of the control. This name must be unique within a billboard, but can repeat on different billboard.',),
('BBControl','Attributes','Y',0,2147483647,None, None, None, None, 'A 32-bit word that specifies the attribute flags to be applied to this control.',),
('BBControl','Billboard_','N',None, None, 'Billboard',1,'Identifier',None, 'External key to the Billboard table, name of the billboard.',),
('BBControl','Height','N',0,32767,None, None, None, None, 'Height of the bounding rectangle of the control.',),
('BBControl','Width','N',0,32767,None, None, None, None, 'Width of the bounding rectangle of the control.',),
('BBControl','X','N',0,32767,None, None, None, None, 'Horizontal coordinate of the upper left corner of the bounding rectangle of the control.',),
('Billboard','Action','Y',None, None, None, None, 'Identifier',None, 'The name of an action. The billboard is displayed during the progress messages received from this action.',),
('Billboard','Billboard','N',None, None, None, None, 'Identifier',None, 'Name of the billboard.',),
('Billboard','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'An external key to the Feature Table. The billboard is shown only if this feature is being installed.',),
('Billboard','Ordering','Y',0,32767,None, None, None, None, 'A positive integer. If there is more than one billboard corresponding to an action they will be shown in the order defined by this column.',),
('Feature','Description','Y',None, None, None, None, 'Text',None, 'Longer descriptive text describing a visible feature item.',),
('Feature','Attributes','N',None, None, None, None, None, '0;1;2;4;5;6;8;9;10;16;17;18;20;21;22;24;25;26;32;33;34;36;37;38;48;49;50;52;53;54','Feature attributes',),
('Feature','Feature','N',None, None, None, None, 'Identifier',None, 'Primary key used to identify a particular feature record.',),
('Feature','Directory_','Y',None, None, 'Directory',1,'UpperCase',None, 'The name of the Directory that can be configured by the UI. A non-null value will enable the browse button.',),
('Feature','Level','N',0,32767,None, None, None, None, 'The install level at which record will be initially selected. An install level of 0 will disable an item and prevent its display.',),
('Feature','Title','Y',None, None, None, None, 'Text',None, 'Short text identifying a visible feature item.',),
('Feature','Display','Y',0,32767,None, None, None, None, 'Numeric sort order, used to force a specific display ordering.',),
('Feature','Feature_Parent','Y',None, None, 'Feature',1,'Identifier',None, 'Optional key of a parent record in the same table. If the parent is not selected, then the record will not be installed. Null indicates a root item.',),
('Binary','Name','N',None, None, None, None, 'Identifier',None, 'Unique key identifying the binary data.',),
('Binary','Data','N',None, None, None, None, 'Binary',None, 'The unformatted binary data.',),
('BindImage','File_','N',None, None, 'File',1,'Identifier',None, 'The index into the File table. This must be an executable file.',),
('BindImage','Path','Y',None, None, None, None, 'Paths',None, 'A list of ; delimited paths that represent the paths to be searched for the import DLLS. The list is usually a list of properties each enclosed within square brackets [] .',),
('File','Sequence','N',1,32767,None, None, None, None, 'Sequence with respect to the media images; order must track cabinet order.',),
('File','Attributes','Y',0,32767,None, None, None, None, 'Integer containing bit flags representing file attributes (with the decimal value of each bit position in parentheses)',),
('File','File','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token, must match identifier in cabinet. For uncompressed files, this field is ignored.',),
('File','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key referencing Component that controls the file.',),
('File','FileName','N',None, None, None, None, 'Filename',None, 'File name used for installation, may be localized. This may contain a "short name|long name" pair.',),
('File','FileSize','N',0,2147483647,None, None, None, None, 'Size of file in bytes (integer).',),
('File','Language','Y',None, None, None, None, 'Language',None, 'List of decimal language Ids, comma-separated if more than one.',),
('File','Version','Y',None, None, 'File',1,'Version',None, 'Version string for versioned files; Blank for unversioned files.',),
('CCPSearch','Signature_','N',None, None, 'Signature;RegLocator;IniLocator;DrLocator;CompLocator',1,'Identifier',None, 'The Signature_ represents a unique file signature and is also the foreign key in the Signature, RegLocator, IniLocator, CompLocator and the DrLocator tables.',),
('CheckBox','Property','N',None, None, None, None, 'Identifier',None, 'A named property to be tied to the item.',),
('CheckBox','Value','Y',None, None, None, None, 'Formatted',None, 'The value string associated with the item.',),
('Class','Description','Y',None, None, None, None, 'Text',None, 'Localized description for the Class.',),
('Class','Attributes','Y',None, 32767,None, None, None, None, 'Class registration attributes.',),
('Class','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the CLSID factory to be operational.',),
('Class','AppId_','Y',None, None, 'AppId',1,'Guid',None, 'Optional AppID containing DCOM information for associated application (string GUID).',),
('Class','Argument','Y',None, None, None, None, 'Formatted',None, 'optional argument for LocalServers.',),
('Class','CLSID','N',None, None, None, None, 'Guid',None, 'The CLSID of an OLE factory.',),
('Class','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
('Class','Context','N',None, None, None, None, 'Identifier',None, 'The numeric server context for this server. CLSCTX_xxxx',),
('Class','DefInprocHandler','Y',None, None, None, None, 'Filename','1;2;3','Optional default inproc handler. Only optionally provided if Context=CLSCTX_LOCAL_SERVER. Typically "ole32.dll" or "mapi32.dll"',),
('Class','FileTypeMask','Y',None, None, None, None, 'Text',None, 'Optional string containing information for the HKCRthis CLSID) key. If multiple patterns exist, they must be delimited by a semicolon, and numeric subkeys will be generated: 0,1,2...',),
('Class','Icon_','Y',None, None, 'Icon',1,'Identifier',None, 'Optional foreign key into the Icon Table, specifying the icon file associated with this CLSID. Will be written under the DefaultIcon key.',),
('Class','IconIndex','Y',-32767,32767,None, None, None, None, 'Optional icon index.',),
('Class','ProgId_Default','Y',None, None, 'ProgId',1,'Text',None, 'Optional ProgId associated with this CLSID.',),
('Component','Condition','Y',None, None, None, None, 'Condition',None, "A conditional statement that will disable this component if the specified condition evaluates to the 'True' state. If a component is disabled, it will not be installed, regardless of the 'Action' state associated with the component.",),
('Component','Attributes','N',None, None, None, None, None, None, 'Remote execution option, one of irsEnum',),
('Component','Component','N',None, None, None, None, 'Identifier',None, 'Primary key used to identify a particular component record.',),
('Component','ComponentId','Y',None, None, None, None, 'Guid',None, 'A string GUID unique to this component, version, and language.',),
('Component','Directory_','N',None, None, 'Directory',1,'Identifier',None, 'Required key of a Directory table record. This is actually a property name whose value contains the actual path, set either by the AppSearch action or with the default setting obtained from the Directory table.',),
('Component','KeyPath','Y',None, None, 'File;Registry;ODBCDataSource',1,'Identifier',None, 'Either the primary key into the File table, Registry table, or ODBCDataSource table. This extract path is stored when the component is installed, and is used to detect the presence of the component and to return the path to it.',),
('Icon','Name','N',None, None, None, None, 'Identifier',None, 'Primary key. Name of the icon file.',),
('Icon','Data','N',None, None, None, None, 'Binary',None, 'Binary stream. The binary icon data in PE (.DLL or .EXE) or icon (.ICO) format.',),
('ProgId','Description','Y',None, None, None, None, 'Text',None, 'Localized description for the Program identifier.',),
('ProgId','Icon_','Y',None, None, 'Icon',1,'Identifier',None, 'Optional foreign key into the Icon Table, specifying the icon file associated with this ProgId. Will be written under the DefaultIcon key.',),
('ProgId','IconIndex','Y',-32767,32767,None, None, None, None, 'Optional icon index.',),
('ProgId','ProgId','N',None, None, None, None, 'Text',None, 'The Program Identifier. Primary key.',),
('ProgId','Class_','Y',None, None, 'Class',1,'Guid',None, 'The CLSID of an OLE factory corresponding to the ProgId.',),
('ProgId','ProgId_Parent','Y',None, None, 'ProgId',1,'Text',None, 'The Parent Program Identifier. If specified, the ProgId column becomes a version independent prog id.',),
('ComboBox','Text','Y',None, None, None, None, 'Formatted',None, 'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
('ComboBox','Property','N',None, None, None, None, 'Identifier',None, 'A named property to be tied to this item. All the items tied to the same property become part of the same combobox.',),
('ComboBox','Value','N',None, None, None, None, 'Formatted',None, 'The value string associated with this item. Selecting the line will set the associated property to this value.',),
('ComboBox','Order','N',1,32767,None, None, None, None, 'A positive integer used to determine the ordering of the items within one list.\tThe integers do not have to be consecutive.',),
('CompLocator','Type','Y',0,1,None, None, None, None, 'A boolean value that determines if the registry value is a filename or a directory location.',),
('CompLocator','Signature_','N',None, None, None, None, 'Identifier',None, 'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
('CompLocator','ComponentId','N',None, None, None, None, 'Guid',None, 'A string GUID unique to this component, version, and language.',),
('Complus','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key referencing Component that controls the ComPlus component.',),
('Complus','ExpType','Y',0,32767,None, None, None, None, 'ComPlus component attributes.',),
('Directory','Directory','N',None, None, None, None, 'Identifier',None, 'Unique identifier for directory entry, primary key. If a property by this name is defined, it contains the full path to the directory.',),
('Directory','DefaultDir','N',None, None, None, None, 'DefaultDir',None, "The default sub-path under parent's path.",),
('Directory','Directory_Parent','Y',None, None, 'Directory',1,'Identifier',None, 'Reference to the entry in this table specifying the default parent directory. A record parented to itself or with a Null parent represents a root of the install tree.',),
('Control','Type','N',None, None, None, None, 'Identifier',None, 'The type of the control.',),
('Control','Y','N',0,32767,None, None, None, None, 'Vertical coordinate of the upper left corner of the bounding rectangle of the control.',),
('Control','Text','Y',None, None, None, None, 'Formatted',None, 'A string used to set the initial text contained within a control (if appropriate).',),
('Control','Property','Y',None, None, None, None, 'Identifier',None, 'The name of a defined property to be linked to this control. ',),
('Control','Attributes','Y',0,2147483647,None, None, None, None, 'A 32-bit word that specifies the attribute flags to be applied to this control.',),
('Control','Height','N',0,32767,None, None, None, None, 'Height of the bounding rectangle of the control.',),
('Control','Width','N',0,32767,None, None, None, None, 'Width of the bounding rectangle of the control.',),
('Control','X','N',0,32767,None, None, None, None, 'Horizontal coordinate of the upper left corner of the bounding rectangle of the control.',),
('Control','Control','N',None, None, None, None, 'Identifier',None, 'Name of the control. This name must be unique within a dialog, but can repeat on different dialogs. ',),
('Control','Control_Next','Y',None, None, 'Control',2,'Identifier',None, 'The name of an other control on the same dialog. This link defines the tab order of the controls. The links have to form one or more cycles!',),
('Control','Dialog_','N',None, None, 'Dialog',1,'Identifier',None, 'External key to the Dialog table, name of the dialog.',),
('Control','Help','Y',None, None, None, None, 'Text',None, 'The help strings used with the button. The text is optional. ',),
('Dialog','Attributes','Y',0,2147483647,None, None, None, None, 'A 32-bit word that specifies the attribute flags to be applied to this dialog.',),
('Dialog','Height','N',0,32767,None, None, None, None, 'Height of the bounding rectangle of the dialog.',),
('Dialog','Width','N',0,32767,None, None, None, None, 'Width of the bounding rectangle of the dialog.',),
('Dialog','Dialog','N',None, None, None, None, 'Identifier',None, 'Name of the dialog.',),
('Dialog','Control_Cancel','Y',None, None, 'Control',2,'Identifier',None, 'Defines the cancel control. Hitting escape or clicking on the close icon on the dialog is equivalent to pushing this button.',),
('Dialog','Control_Default','Y',None, None, 'Control',2,'Identifier',None, 'Defines the default control. Hitting return is equivalent to pushing this button.',),
('Dialog','Control_First','N',None, None, 'Control',2,'Identifier',None, 'Defines the control that has the focus when the dialog is created.',),
('Dialog','HCentering','N',0,100,None, None, None, None, 'Horizontal position of the dialog on a 0-100 scale. 0 means left end, 100 means right end of the screen, 50 center.',),
('Dialog','Title','Y',None, None, None, None, 'Formatted',None, "A text string specifying the title to be displayed in the title bar of the dialog's window.",),
('Dialog','VCentering','N',0,100,None, None, None, None, 'Vertical position of the dialog on a 0-100 scale. 0 means top end, 100 means bottom end of the screen, 50 center.',),
('ControlCondition','Action','N',None, None, None, None, None, 'Default;Disable;Enable;Hide;Show','The desired action to be taken on the specified control.',),
('ControlCondition','Condition','N',None, None, None, None, 'Condition',None, 'A standard conditional statement that specifies under which conditions the action should be triggered.',),
('ControlCondition','Dialog_','N',None, None, 'Dialog',1,'Identifier',None, 'A foreign key to the Dialog table, name of the dialog.',),
('ControlCondition','Control_','N',None, None, 'Control',2,'Identifier',None, 'A foreign key to the Control table, name of the control.',),
('ControlEvent','Condition','Y',None, None, None, None, 'Condition',None, 'A standard conditional statement that specifies under which conditions an event should be triggered.',),
('ControlEvent','Ordering','Y',0,2147483647,None, None, None, None, 'An integer used to order several events tied to the same control. Can be left blank.',),
('ControlEvent','Argument','N',None, None, None, None, 'Formatted',None, 'A value to be used as a modifier when triggering a particular event.',),
('ControlEvent','Dialog_','N',None, None, 'Dialog',1,'Identifier',None, 'A foreign key to the Dialog table, name of the dialog.',),
('ControlEvent','Control_','N',None, None, 'Control',2,'Identifier',None, 'A foreign key to the Control table, name of the control',),
('ControlEvent','Event','N',None, None, None, None, 'Formatted',None, 'An identifier that specifies the type of the event that should take place when the user interacts with control specified by the first two entries.',),
('CreateFolder','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table.',),
('CreateFolder','Directory_','N',None, None, 'Directory',1,'Identifier',None, 'Primary key, could be foreign key into the Directory table.',),
('CustomAction','Type','N',1,16383,None, None, None, None, 'The numeric custom action type, consisting of source location, code type, entry, option flags.',),
('CustomAction','Action','N',None, None, None, None, 'Identifier',None, 'Primary key, name of action, normally appears in sequence table unless private use.',),
('CustomAction','Source','Y',None, None, None, None, 'CustomSource',None, 'The table reference of the source of the code.',),
('CustomAction','Target','Y',None, None, None, None, 'Formatted',None, 'Excecution parameter, depends on the type of custom action',),
('DrLocator','Signature_','N',None, None, None, None, 'Identifier',None, 'The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
('DrLocator','Path','Y',None, None, None, None, 'AnyPath',None, 'The path on the user system. This is a either a subpath below the value of the Parent or a full path. The path may contain properties enclosed within [ ] that will be expanded.',),
('DrLocator','Depth','Y',0,32767,None, None, None, None, 'The depth below the path to which the Signature_ is recursively searched. If absent, the depth is assumed to be 0.',),
('DrLocator','Parent','Y',None, None, None, None, 'Identifier',None, 'The parent file signature. It is also a foreign key in the Signature table. If null and the Path column does not expand to a full path, then all the fixed drives of the user system are searched using the Path.',),
('DuplicateFile','File_','N',None, None, 'File',1,'Identifier',None, 'Foreign key referencing the source file to be duplicated.',),
('DuplicateFile','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key referencing Component that controls the duplicate file.',),
('DuplicateFile','DestFolder','Y',None, None, None, None, 'Identifier',None, 'Name of a property whose value is assumed to resolve to the full pathname to a destination folder.',),
('DuplicateFile','DestName','Y',None, None, None, None, 'Filename',None, 'Filename to be given to the duplicate file.',),
('DuplicateFile','FileKey','N',None, None, None, None, 'Identifier',None, 'Primary key used to identify a particular file entry',),
('Environment','Name','N',None, None, None, None, 'Text',None, 'The name of the environmental value.',),
('Environment','Value','Y',None, None, None, None, 'Formatted',None, 'The value to set in the environmental settings.',),
('Environment','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table referencing component that controls the installing of the environmental value.',),
('Environment','Environment','N',None, None, None, None, 'Identifier',None, 'Unique identifier for the environmental variable setting',),
('Error','Error','N',0,32767,None, None, None, None, 'Integer error number, obtained from header file IError(...) macros.',),
('Error','Message','Y',None, None, None, None, 'Template',None, 'Error formatting template, obtained from user ed. or localizers.',),
('EventMapping','Dialog_','N',None, None, 'Dialog',1,'Identifier',None, 'A foreign key to the Dialog table, name of the Dialog.',),
('EventMapping','Control_','N',None, None, 'Control',2,'Identifier',None, 'A foreign key to the Control table, name of the control.',),
('EventMapping','Event','N',None, None, None, None, 'Identifier',None, 'An identifier that specifies the type of the event that the control subscribes to.',),
('EventMapping','Attribute','N',None, None, None, None, 'Identifier',None, 'The name of the control attribute, that is set when this event is received.',),
('Extension','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the CLSID factory to be operational.',),
('Extension','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
('Extension','Extension','N',None, None, None, None, 'Text',None, 'The extension associated with the table row.',),
('Extension','MIME_','Y',None, None, 'MIME',1,'Text',None, 'Optional Context identifier, typically "type/format" associated with the extension',),
('Extension','ProgId_','Y',None, None, 'ProgId',1,'Text',None, 'Optional ProgId associated with this extension.',),
('MIME','CLSID','Y',None, None, None, None, 'Guid',None, 'Optional associated CLSID.',),
('MIME','ContentType','N',None, None, None, None, 'Text',None, 'Primary key. Context identifier, typically "type/format".',),
('MIME','Extension_','N',None, None, 'Extension',1,'Text',None, 'Optional associated extension (without dot)',),
('FeatureComponents','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Foreign key into Feature table.',),
('FeatureComponents','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into Component table.',),
('FileSFPCatalog','File_','N',None, None, 'File',1,'Identifier',None, 'File associated with the catalog',),
('FileSFPCatalog','SFPCatalog_','N',None, None, 'SFPCatalog',1,'Filename',None, 'Catalog associated with the file',),
('SFPCatalog','SFPCatalog','N',None, None, None, None, 'Filename',None, 'File name for the catalog.',),
('SFPCatalog','Catalog','N',None, None, None, None, 'Binary',None, 'SFP Catalog',),
('SFPCatalog','Dependency','Y',None, None, None, None, 'Formatted',None, 'Parent catalog - only used by SFP',),
('Font','File_','N',None, None, 'File',1,'Identifier',None, 'Primary key, foreign key into File table referencing font file.',),
('Font','FontTitle','Y',None, None, None, None, 'Text',None, 'Font name.',),
('IniFile','Action','N',None, None, None, None, None, '0;1;3','The type of modification to be made, one of iifEnum',),
('IniFile','Value','N',None, None, None, None, 'Formatted',None, 'The value to be written.',),
('IniFile','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table referencing component that controls the installing of the .INI value.',),
('IniFile','FileName','N',None, None, None, None, 'Filename',None, 'The .INI file name in which to write the information',),
('IniFile','IniFile','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('IniFile','DirProperty','Y',None, None, None, None, 'Identifier',None, 'Foreign key into the Directory table denoting the directory where the .INI file is.',),
('IniFile','Key','N',None, None, None, None, 'Formatted',None, 'The .INI file key below Section.',),
('IniFile','Section','N',None, None, None, None, 'Formatted',None, 'The .INI file Section.',),
('IniLocator','Type','Y',0,2,None, None, None, None, 'An integer value that determines if the .INI value read is a filename or a directory location or to be used as is w/o interpretation.',),
('IniLocator','Signature_','N',None, None, None, None, 'Identifier',None, 'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
('IniLocator','FileName','N',None, None, None, None, 'Filename',None, 'The .INI file name.',),
('IniLocator','Key','N',None, None, None, None, 'Text',None, 'Key value (followed by an equals sign in INI file).',),
('IniLocator','Section','N',None, None, None, None, 'Text',None, 'Section name within in file (within square brackets in INI file).',),
('IniLocator','Field','Y',0,32767,None, None, None, None, 'The field in the .INI line. If Field is null or 0 the entire line is read.',),
('InstallExecuteSequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('InstallExecuteSequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('InstallExecuteSequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('InstallUISequence','Action','N',None, None, None, None, 'Identifier',None, 'Name of action to invoke, either in the engine or the handler DLL.',),
('InstallUISequence','Condition','Y',None, None, None, None, 'Condition',None, 'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
('InstallUISequence','Sequence','Y',-4,32767,None, None, None, None, 'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
('IsolatedComponent','Component_Application','N',None, None, 'Component',1,'Identifier',None, 'Key to Component table item for application',),
('IsolatedComponent','Component_Shared','N',None, None, 'Component',1,'Identifier',None, 'Key to Component table item to be isolated',),
('LaunchCondition','Description','N',None, None, None, None, 'Formatted',None, 'Localizable text to display when condition fails and install must abort.',),
('LaunchCondition','Condition','N',None, None, None, None, 'Condition',None, 'Expression which must evaluate to TRUE in order for install to commence.',),
('ListBox','Text','Y',None, None, None, None, 'Text',None, 'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
('ListBox','Property','N',None, None, None, None, 'Identifier',None, 'A named property to be tied to this item. All the items tied to the same property become part of the same listbox.',),
('ListBox','Value','N',None, None, None, None, 'Formatted',None, 'The value string associated with this item. Selecting the line will set the associated property to this value.',),
('ListBox','Order','N',1,32767,None, None, None, None, 'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
('ListView','Text','Y',None, None, None, None, 'Text',None, 'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
('ListView','Property','N',None, None, None, None, 'Identifier',None, 'A named property to be tied to this item. All the items tied to the same property become part of the same listview.',),
('ListView','Value','N',None, None, None, None, 'Identifier',None, 'The value string associated with this item. Selecting the line will set the associated property to this value.',),
('ListView','Order','N',1,32767,None, None, None, None, 'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
('ListView','Binary_','Y',None, None, 'Binary',1,'Identifier',None, 'The name of the icon to be displayed with the icon. The binary information is looked up from the Binary Table.',),
('LockPermissions','Table','N',None, None, None, None, 'Identifier','Directory;File;Registry','Reference to another table name',),
('LockPermissions','Domain','Y',None, None, None, None, 'Formatted',None, 'Domain name for user whose permissions are being set. (usually a property)',),
('LockPermissions','LockObject','N',None, None, None, None, 'Identifier',None, 'Foreign key into Registry or File table',),
('LockPermissions','Permission','Y',-2147483647,2147483647,None, None, None, None, 'Permission Access mask. Full Control = 268435456 (GENERIC_ALL = 0x10000000)',),
('LockPermissions','User','N',None, None, None, None, 'Formatted',None, 'User for permissions to be set. (usually a property)',),
('Media','Source','Y',None, None, None, None, 'Property',None, 'The property defining the location of the cabinet file.',),
('Media','Cabinet','Y',None, None, None, None, 'Cabinet',None, 'If some or all of the files stored on the media are compressed in a cabinet, the name of that cabinet.',),
('Media','DiskId','N',1,32767,None, None, None, None, 'Primary key, integer to determine sort order for table.',),
('Media','DiskPrompt','Y',None, None, None, None, 'Text',None, 'Disk name: the visible text actually printed on the disk. This will be used to prompt the user when this disk needs to be inserted.',),
('Media','LastSequence','N',0,32767,None, None, None, None, 'File sequence number for the last file for this media.',),
('Media','VolumeLabel','Y',None, None, None, None, 'Text',None, 'The label attributed to the volume.',),
('ModuleComponents','Component','N',None, None, 'Component',1,'Identifier',None, 'Component contained in the module.',),
('ModuleComponents','Language','N',None, None, 'ModuleSignature',2,None, None, 'Default language ID for module (may be changed by transform).',),
('ModuleComponents','ModuleID','N',None, None, 'ModuleSignature',1,'Identifier',None, 'Module containing the component.',),
('ModuleSignature','Language','N',None, None, None, None, None, None, 'Default decimal language of module.',),
('ModuleSignature','Version','N',None, None, None, None, 'Version',None, 'Version of the module.',),
('ModuleSignature','ModuleID','N',None, None, None, None, 'Identifier',None, 'Module identifier (String.GUID).',),
('ModuleDependency','ModuleID','N',None, None, 'ModuleSignature',1,'Identifier',None, 'Module requiring the dependency.',),
('ModuleDependency','ModuleLanguage','N',None, None, 'ModuleSignature',2,None, None, 'Language of module requiring the dependency.',),
('ModuleDependency','RequiredID','N',None, None, None, None, None, None, 'String.GUID of required module.',),
('ModuleDependency','RequiredLanguage','N',None, None, None, None, None, None, 'LanguageID of the required module.',),
('ModuleDependency','RequiredVersion','Y',None, None, None, None, 'Version',None, 'Version of the required version.',),
('ModuleExclusion','ModuleID','N',None, None, 'ModuleSignature',1,'Identifier',None, 'String.GUID of module with exclusion requirement.',),
('ModuleExclusion','ModuleLanguage','N',None, None, 'ModuleSignature',2,None, None, 'LanguageID of module with exclusion requirement.',),
('ModuleExclusion','ExcludedID','N',None, None, None, None, None, None, 'String.GUID of excluded module.',),
('ModuleExclusion','ExcludedLanguage','N',None, None, None, None, None, None, 'Language of excluded module.',),
('ModuleExclusion','ExcludedMaxVersion','Y',None, None, None, None, 'Version',None, 'Maximum version of excluded module.',),
('ModuleExclusion','ExcludedMinVersion','Y',None, None, None, None, 'Version',None, 'Minimum version of excluded module.',),
('MoveFile','Component_','N',None, None, 'Component',1,'Identifier',None, 'If this component is not "selected" for installation or removal, no action will be taken on the associated MoveFile entry',),
('MoveFile','DestFolder','N',None, None, None, None, 'Identifier',None, 'Name of a property whose value is assumed to resolve to the full path to the destination directory',),
('MoveFile','DestName','Y',None, None, None, None, 'Filename',None, 'Name to be given to the original file after it is moved or copied. If blank, the destination file will be given the same name as the source file',),
('MoveFile','FileKey','N',None, None, None, None, 'Identifier',None, 'Primary key that uniquely identifies a particular MoveFile record',),
('MoveFile','Options','N',0,1,None, None, None, None, 'Integer value specifying the MoveFile operating mode, one of imfoEnum',),
('MoveFile','SourceFolder','Y',None, None, None, None, 'Identifier',None, 'Name of a property whose value is assumed to resolve to the full path to the source directory',),
('MoveFile','SourceName','Y',None, None, None, None, 'Text',None, "Name of the source file(s) to be moved or copied. Can contain the '*' or '?' wildcards.",),
('MsiAssembly','Attributes','Y',None, None, None, None, None, None, 'Assembly attributes',),
('MsiAssembly','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Foreign key into Feature table.',),
('MsiAssembly','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into Component table.',),
('MsiAssembly','File_Application','Y',None, None, 'File',1,'Identifier',None, 'Foreign key into File table, denoting the application context for private assemblies. Null for global assemblies.',),
('MsiAssembly','File_Manifest','Y',None, None, 'File',1,'Identifier',None, 'Foreign key into the File table denoting the manifest file for the assembly.',),
('MsiAssemblyName','Name','N',None, None, None, None, 'Text',None, 'The name part of the name-value pairs for the assembly name.',),
('MsiAssemblyName','Value','N',None, None, None, None, 'Text',None, 'The value part of the name-value pairs for the assembly name.',),
('MsiAssemblyName','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into Component table.',),
('MsiDigitalCertificate','CertData','N',None, None, None, None, 'Binary',None, 'A certificate context blob for a signer certificate',),
('MsiDigitalCertificate','DigitalCertificate','N',None, None, None, None, 'Identifier',None, 'A unique identifier for the row',),
('MsiDigitalSignature','Table','N',None, None, None, None, None, 'Media','Reference to another table name (only Media table is supported)',),
('MsiDigitalSignature','DigitalCertificate_','N',None, None, 'MsiDigitalCertificate',1,'Identifier',None, 'Foreign key to MsiDigitalCertificate table identifying the signer certificate',),
('MsiDigitalSignature','Hash','Y',None, None, None, None, 'Binary',None, 'The encoded hash blob from the digital signature',),
('MsiDigitalSignature','SignObject','N',None, None, None, None, 'Text',None, 'Foreign key to Media table',),
('MsiFileHash','File_','N',None, None, 'File',1,'Identifier',None, 'Primary key, foreign key into File table referencing file with this hash',),
('MsiFileHash','Options','N',0,32767,None, None, None, None, 'Various options and attributes for this hash.',),
('MsiFileHash','HashPart1','N',None, None, None, None, None, None, 'Size of file in bytes (integer).',),
('MsiFileHash','HashPart2','N',None, None, None, None, None, None, 'Size of file in bytes (integer).',),
('MsiFileHash','HashPart3','N',None, None, None, None, None, None, 'Size of file in bytes (integer).',),
('MsiFileHash','HashPart4','N',None, None, None, None, None, None, 'Size of file in bytes (integer).',),
('MsiPatchHeaders','StreamRef','N',None, None, None, None, 'Identifier',None, 'Primary key. A unique identifier for the row.',),
('MsiPatchHeaders','Header','N',None, None, None, None, 'Binary',None, 'Binary stream. The patch header, used for patch validation.',),
('ODBCAttribute','Value','Y',None, None, None, None, 'Text',None, 'Value for ODBC driver attribute',),
('ODBCAttribute','Attribute','N',None, None, None, None, 'Text',None, 'Name of ODBC driver attribute',),
('ODBCAttribute','Driver_','N',None, None, 'ODBCDriver',1,'Identifier',None, 'Reference to ODBC driver in ODBCDriver table',),
('ODBCDriver','Description','N',None, None, None, None, 'Text',None, 'Text used as registered name for driver, non-localized',),
('ODBCDriver','File_','N',None, None, 'File',1,'Identifier',None, 'Reference to key driver file',),
('ODBCDriver','Component_','N',None, None, 'Component',1,'Identifier',None, 'Reference to associated component',),
('ODBCDriver','Driver','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized.internal token for driver',),
('ODBCDriver','File_Setup','Y',None, None, 'File',1,'Identifier',None, 'Optional reference to key driver setup DLL',),
('ODBCDataSource','Description','N',None, None, None, None, 'Text',None, 'Text used as registered name for data source',),
('ODBCDataSource','Component_','N',None, None, 'Component',1,'Identifier',None, 'Reference to associated component',),
('ODBCDataSource','DataSource','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized.internal token for data source',),
('ODBCDataSource','DriverDescription','N',None, None, None, None, 'Text',None, 'Reference to driver description, may be existing driver',),
('ODBCDataSource','Registration','N',0,1,None, None, None, None, 'Registration option: 0=machine, 1=user, others t.b.d.',),
('ODBCSourceAttribute','Value','Y',None, None, None, None, 'Text',None, 'Value for ODBC data source attribute',),
('ODBCSourceAttribute','Attribute','N',None, None, None, None, 'Text',None, 'Name of ODBC data source attribute',),
('ODBCSourceAttribute','DataSource_','N',None, None, 'ODBCDataSource',1,'Identifier',None, 'Reference to ODBC data source in ODBCDataSource table',),
('ODBCTranslator','Description','N',None, None, None, None, 'Text',None, 'Text used as registered name for translator',),
('ODBCTranslator','File_','N',None, None, 'File',1,'Identifier',None, 'Reference to key translator file',),
('ODBCTranslator','Component_','N',None, None, 'Component',1,'Identifier',None, 'Reference to associated component',),
('ODBCTranslator','File_Setup','Y',None, None, 'File',1,'Identifier',None, 'Optional reference to key translator setup DLL',),
('ODBCTranslator','Translator','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized.internal token for translator',),
('Patch','Sequence','N',0,32767,None, None, None, None, 'Primary key, sequence with respect to the media images; order must track cabinet order.',),
('Patch','Attributes','N',0,32767,None, None, None, None, 'Integer containing bit flags representing patch attributes',),
('Patch','File_','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token, foreign key to File table, must match identifier in cabinet.',),
('Patch','Header','Y',None, None, None, None, 'Binary',None, 'Binary stream. The patch header, used for patch validation.',),
('Patch','PatchSize','N',0,2147483647,None, None, None, None, 'Size of patch in bytes (integer).',),
('Patch','StreamRef_','Y',None, None, None, None, 'Identifier',None, 'Identifier. Foreign key to the StreamRef column of the MsiPatchHeaders table.',),
('PatchPackage','Media_','N',0,32767,None, None, None, None, 'Foreign key to DiskId column of Media table. Indicates the disk containing the patch package.',),
('PatchPackage','PatchId','N',None, None, None, None, 'Guid',None, 'A unique string GUID representing this patch.',),
('PublishComponent','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Foreign key into the Feature table.',),
('PublishComponent','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table.',),
('PublishComponent','ComponentId','N',None, None, None, None, 'Guid',None, 'A string GUID that represents the component id that will be requested by the alien product.',),
('PublishComponent','AppData','Y',None, None, None, None, 'Text',None, 'This is localisable Application specific data that can be associated with a Qualified Component.',),
('PublishComponent','Qualifier','N',None, None, None, None, 'Text',None, 'This is defined only when the ComponentId column is an Qualified Component Id. This is the Qualifier for ProvideComponentIndirect.',),
('RadioButton','Y','N',0,32767,None, None, None, None, 'The vertical coordinate of the upper left corner of the bounding rectangle of the radio button.',),
('RadioButton','Text','Y',None, None, None, None, 'Text',None, 'The visible title to be assigned to the radio button.',),
('RadioButton','Property','N',None, None, None, None, 'Identifier',None, 'A named property to be tied to this radio button. All the buttons tied to the same property become part of the same group.',),
('RadioButton','Height','N',0,32767,None, None, None, None, 'The height of the button.',),
('RadioButton','Width','N',0,32767,None, None, None, None, 'The width of the button.',),
('RadioButton','X','N',0,32767,None, None, None, None, 'The horizontal coordinate of the upper left corner of the bounding rectangle of the radio button.',),
('RadioButton','Value','N',None, None, None, None, 'Formatted',None, 'The value string associated with this button. Selecting the button will set the associated property to this value.',),
('RadioButton','Order','N',1,32767,None, None, None, None, 'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
('RadioButton','Help','Y',None, None, None, None, 'Text',None, 'The help strings used with the button. The text is optional.',),
('Registry','Name','Y',None, None, None, None, 'Formatted',None, 'The registry value name.',),
('Registry','Value','Y',None, None, None, None, 'Formatted',None, 'The registry value.',),
('Registry','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table referencing component that controls the installing of the registry value.',),
('Registry','Key','N',None, None, None, None, 'RegPath',None, 'The key for the registry value.',),
('Registry','Registry','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('Registry','Root','N',-1,3,None, None, None, None, 'The predefined root key for the registry value, one of rrkEnum.',),
('RegLocator','Name','Y',None, None, None, None, 'Formatted',None, 'The registry value name.',),
('RegLocator','Type','Y',0,18,None, None, None, None, 'An integer value that determines if the registry value is a filename or a directory location or to be used as is w/o interpretation.',),
('RegLocator','Signature_','N',None, None, None, None, 'Identifier',None, 'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table. If the type is 0, the registry values refers a directory, and _Signature is not a foreign key.',),
('RegLocator','Key','N',None, None, None, None, 'RegPath',None, 'The key for the registry value.',),
('RegLocator','Root','N',0,3,None, None, None, None, 'The predefined root key for the registry value, one of rrkEnum.',),
('RemoveFile','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key referencing Component that controls the file to be removed.',),
('RemoveFile','FileKey','N',None, None, None, None, 'Identifier',None, 'Primary key used to identify a particular file entry',),
('RemoveFile','FileName','Y',None, None, None, None, 'WildCardFilename',None, 'Name of the file to be removed.',),
('RemoveFile','DirProperty','N',None, None, None, None, 'Identifier',None, 'Name of a property whose value is assumed to resolve to the full pathname to the folder of the file to be removed.',),
('RemoveFile','InstallMode','N',None, None, None, None, None, '1;2;3','Installation option, one of iimEnum.',),
('RemoveIniFile','Action','N',None, None, None, None, None, '2;4','The type of modification to be made, one of iifEnum.',),
('RemoveIniFile','Value','Y',None, None, None, None, 'Formatted',None, 'The value to be deleted. The value is required when Action is iifIniRemoveTag',),
('RemoveIniFile','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table referencing component that controls the deletion of the .INI value.',),
('RemoveIniFile','FileName','N',None, None, None, None, 'Filename',None, 'The .INI file name in which to delete the information',),
('RemoveIniFile','DirProperty','Y',None, None, None, None, 'Identifier',None, 'Foreign key into the Directory table denoting the directory where the .INI file is.',),
('RemoveIniFile','Key','N',None, None, None, None, 'Formatted',None, 'The .INI file key below Section.',),
('RemoveIniFile','Section','N',None, None, None, None, 'Formatted',None, 'The .INI file Section.',),
('RemoveIniFile','RemoveIniFile','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('RemoveRegistry','Name','Y',None, None, None, None, 'Formatted',None, 'The registry value name.',),
('RemoveRegistry','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table referencing component that controls the deletion of the registry value.',),
('RemoveRegistry','Key','N',None, None, None, None, 'RegPath',None, 'The key for the registry value.',),
('RemoveRegistry','Root','N',-1,3,None, None, None, None, 'The predefined root key for the registry value, one of rrkEnum',),
('RemoveRegistry','RemoveRegistry','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('ReserveCost','Component_','N',None, None, 'Component',1,'Identifier',None, 'Reserve a specified amount of space if this component is to be installed.',),
('ReserveCost','ReserveFolder','Y',None, None, None, None, 'Identifier',None, 'Name of a property whose value is assumed to resolve to the full path to the destination directory',),
('ReserveCost','ReserveKey','N',None, None, None, None, 'Identifier',None, 'Primary key that uniquely identifies a particular ReserveCost record',),
('ReserveCost','ReserveLocal','N',0,2147483647,None, None, None, None, 'Disk space to reserve if linked component is installed locally.',),
('ReserveCost','ReserveSource','N',0,2147483647,None, None, None, None, 'Disk space to reserve if linked component is installed to run from the source location.',),
('SelfReg','File_','N',None, None, 'File',1,'Identifier',None, 'Foreign key into the File table denoting the module that needs to be registered.',),
('SelfReg','Cost','Y',0,32767,None, None, None, None, 'The cost of registering the module.',),
('ServiceControl','Name','N',None, None, None, None, 'Formatted',None, 'Name of a service. /, \\, comma and space are invalid',),
('ServiceControl','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table that controls the startup of the service',),
('ServiceControl','Event','N',0,187,None, None, None, None, 'Bit field: Install: 0x1 = Start, 0x2 = Stop, 0x8 = Delete, Uninstall: 0x10 = Start, 0x20 = Stop, 0x80 = Delete',),
('ServiceControl','ServiceControl','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('ServiceControl','Arguments','Y',None, None, None, None, 'Formatted',None, 'Arguments for the service. Separate by [~].',),
('ServiceControl','Wait','Y',0,1,None, None, None, None, 'Boolean for whether to wait for the service to fully start',),
('ServiceInstall','Name','N',None, None, None, None, 'Formatted',None, 'Internal Name of the Service',),
('ServiceInstall','Description','Y',None, None, None, None, 'Text',None, 'Description of service.',),
('ServiceInstall','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table that controls the startup of the service',),
('ServiceInstall','Arguments','Y',None, None, None, None, 'Formatted',None, 'Arguments to include in every start of the service, passed to WinMain',),
('ServiceInstall','ServiceInstall','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('ServiceInstall','Dependencies','Y',None, None, None, None, 'Formatted',None, 'Other services this depends on to start. Separate by [~], and end with [~][~]',),
('ServiceInstall','DisplayName','Y',None, None, None, None, 'Formatted',None, 'External Name of the Service',),
('ServiceInstall','ErrorControl','N',-2147483647,2147483647,None, None, None, None, 'Severity of error if service fails to start',),
('ServiceInstall','LoadOrderGroup','Y',None, None, None, None, 'Formatted',None, 'LoadOrderGroup',),
('ServiceInstall','Password','Y',None, None, None, None, 'Formatted',None, 'password to run service with. (with StartName)',),
('ServiceInstall','ServiceType','N',-2147483647,2147483647,None, None, None, None, 'Type of the service',),
('ServiceInstall','StartName','Y',None, None, None, None, 'Formatted',None, 'User or object name to run service as',),
('ServiceInstall','StartType','N',0,4,None, None, None, None, 'Type of the service',),
('Shortcut','Name','N',None, None, None, None, 'Filename',None, 'The name of the shortcut to be created.',),
('Shortcut','Description','Y',None, None, None, None, 'Text',None, 'The description for the shortcut.',),
('Shortcut','Component_','N',None, None, 'Component',1,'Identifier',None, 'Foreign key into the Component table denoting the component whose selection gates the shortcut creation/deletion.',),
('Shortcut','Icon_','Y',None, None, 'Icon',1,'Identifier',None, 'Foreign key into the File table denoting the external icon file for the shortcut.',),
('Shortcut','IconIndex','Y',-32767,32767,None, None, None, None, 'The icon index for the shortcut.',),
('Shortcut','Directory_','N',None, None, 'Directory',1,'Identifier',None, 'Foreign key into the Directory table denoting the directory where the shortcut file is created.',),
('Shortcut','Target','N',None, None, None, None, 'Shortcut',None, 'The shortcut target. This is usually a property that is expanded to a file or a folder that the shortcut points to.',),
('Shortcut','Arguments','Y',None, None, None, None, 'Formatted',None, 'The command-line arguments for the shortcut.',),
('Shortcut','Shortcut','N',None, None, None, None, 'Identifier',None, 'Primary key, non-localized token.',),
('Shortcut','Hotkey','Y',0,32767,None, None, None, None, 'The hotkey for the shortcut. It has the virtual-key code for the key in the low-order byte, and the modifier flags in the high-order byte. ',),
('Shortcut','ShowCmd','Y',None, None, None, None, None, '1;3;7','The show command for the application window.The following values may be used.',),
('Shortcut','WkDir','Y',None, None, None, None, 'Identifier',None, 'Name of property defining location of working directory.',),
('Signature','FileName','N',None, None, None, None, 'Filename',None, 'The name of the file. This may contain a "short name|long name" pair.',),
('Signature','Signature','N',None, None, None, None, 'Identifier',None, 'The table key. The Signature represents a unique file signature.',),
('Signature','Languages','Y',None, None, None, None, 'Language',None, 'The languages supported by the file.',),
('Signature','MaxDate','Y',0,2147483647,None, None, None, None, 'The maximum creation date of the file.',),
('Signature','MaxSize','Y',0,2147483647,None, None, None, None, 'The maximum size of the file. ',),
('Signature','MaxVersion','Y',None, None, None, None, 'Text',None, 'The maximum version of the file.',),
('Signature','MinDate','Y',0,2147483647,None, None, None, None, 'The minimum creation date of the file.',),
('Signature','MinSize','Y',0,2147483647,None, None, None, None, 'The minimum size of the file.',),
('Signature','MinVersion','Y',None, None, None, None, 'Text',None, 'The minimum version of the file.',),
('TextStyle','TextStyle','N',None, None, None, None, 'Identifier',None, 'Name of the style. The primary key of this table. This name is embedded in the texts to indicate a style change.',),
('TextStyle','Color','Y',0,16777215,None, None, None, None, 'An integer indicating the color of the string in the RGB format (Red, Green, Blue each 0-255, RGB = R + 256*G + 256^2*B).',),
('TextStyle','FaceName','N',None, None, None, None, 'Text',None, 'A string indicating the name of the font used. Required. The string must be at most 31 characters long.',),
('TextStyle','Size','N',0,32767,None, None, None, None, 'The size of the font used. This size is given in our units (1/12 of the system font height). Assuming that the system font is set to 12 point size, this is equivalent to the point size.',),
('TextStyle','StyleBits','Y',0,15,None, None, None, None, 'A combination of style bits.',),
('TypeLib','Description','Y',None, None, None, None, 'Text',None, None, ),
('TypeLib','Feature_','N',None, None, 'Feature',1,'Identifier',None, 'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the type library to be operational.',),
('TypeLib','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
('TypeLib','Directory_','Y',None, None, 'Directory',1,'Identifier',None, 'Optional. The foreign key into the Directory table denoting the path to the help file for the type library.',),
('TypeLib','Language','N',0,32767,None, None, None, None, 'The language of the library.',),
('TypeLib','Version','Y',0,16777215,None, None, None, None, 'The version of the library. The minor version is in the lower 8 bits of the integer. The major version is in the next 16 bits. ',),
('TypeLib','Cost','Y',0,2147483647,None, None, None, None, 'The cost associated with the registration of the typelib. This column is currently optional.',),
('TypeLib','LibID','N',None, None, None, None, 'Guid',None, 'The GUID that represents the library.',),
('UIText','Text','Y',None, None, None, None, 'Text',None, 'The localized version of the string.',),
('UIText','Key','N',None, None, None, None, 'Identifier',None, 'A unique key that identifies the particular string.',),
('Upgrade','Attributes','N',0,2147483647,None, None, None, None, 'The attributes of this product set.',),
('Upgrade','Language','Y',None, None, None, None, 'Language',None, 'A comma-separated list of languages for either products in this set or products not in this set.',),
('Upgrade','ActionProperty','N',None, None, None, None, 'UpperCase',None, 'The property to set when a product in this set is found.',),
('Upgrade','Remove','Y',None, None, None, None, 'Formatted',None, 'The list of features to remove when uninstalling a product from this set. The default is "ALL".',),
('Upgrade','UpgradeCode','N',None, None, None, None, 'Guid',None, 'The UpgradeCode GUID belonging to the products in this set.',),
('Upgrade','VersionMax','Y',None, None, None, None, 'Text',None, 'The maximum ProductVersion of the products in this set. The set may or may not include products with this particular version.',),
('Upgrade','VersionMin','Y',None, None, None, None, 'Text',None, 'The minimum ProductVersion of the products in this set. The set may or may not include products with this particular version.',),
('Verb','Sequence','Y',0,32767,None, None, None, None, 'Order within the verbs for a particular extension. Also used simply to specify the default verb.',),
('Verb','Argument','Y',None, None, None, None, 'Formatted',None, 'Optional value for the command arguments.',),
('Verb','Extension_','N',None, None, 'Extension',1,'Text',None, 'The extension associated with the table row.',),
('Verb','Verb','N',None, None, None, None, 'Text',None, 'The verb for the command.',),
('Verb','Command','Y',None, None, None, None, 'Formatted',None, 'The command text.',),
]
|
GeekTrainer/Flask
|
refs/heads/master
|
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/commands/search.py
|
344
|
import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
|
bung87/django-js-reverse
|
refs/heads/development
|
django_js_reverse/views.py
|
1
|
# -*- coding: utf-8 -*-
import re,sys
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.template import loader
from slimit import minify
from .js_reverse_settings import (JS_EXCLUDE_NAMESPACES, JS_GLOBAL_OBJECT_NAME,
JS_MINIFY, JS_VAR_NAME,JS_REVERSE_USE_BACKBONE)
if sys.version < '3':
text_type = unicode # NOQA
else:
text_type = str
JS_IDENTIFIER_RE = re.compile(r'^[$A-Z_][\dA-Z_$]*$')
JS_REVERSE_USE_BACKBONE =getattr(settings,'JS_REVERSE_USE_BACKBONE',False)
def urls_js(request=None):
js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)
if not JS_IDENTIFIER_RE.match(js_var_name.upper()):
raise ImproperlyConfigured(
'JS_REVERSE_JS_VAR_NAME setting "%s" is not a valid javascript identifier.' % (js_var_name))
js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)
if not JS_IDENTIFIER_RE.match(js_global_object_name.upper()):
raise ImproperlyConfigured(
'JS_REVERSE_JS_GLOBAL_OBJECT_NAME setting "%s" is not a valid javascript identifier.' % (
js_global_object_name))
minfiy = getattr(settings, 'JS_REVERSE_JS_MINIFY', JS_MINIFY)
if not isinstance(minfiy, bool):
raise ImproperlyConfigured(
'JS_REVERSE_JS_MINIFY setting "%s" is not a valid. Needs to be set to True or False.' % (minfiy))
script_prefix_via_config = getattr(settings, 'JS_REVERSE_SCRIPT_PREFIX', None)
if script_prefix_via_config:
script_prefix = script_prefix_via_config
if not script_prefix.endswith('/'):
script_prefix = '{0}/'.format(script_prefix)
else:
script_prefix = urlresolvers.get_script_prefix()
default_urlresolver = urlresolvers.get_resolver(getattr(request, 'urlconf', None))
url_list = sorted(list(prepare_url_list(default_urlresolver)))
url_routes={}
if JS_REVERSE_USE_BACKBONE:
for url_name,parttern in url_list:
parttern = parttern[0][0].replace('%(',':').replace(')s','')
url_routes[parttern]= url_name
# url_list.append((url_name, parttern))
response_body = loader.render_to_string('django_js_reverse/urls_js.tpl', {
'urls': url_list,
'url_routes':url_routes,
'url_prefix': script_prefix,
'js_var_name': js_var_name,
'js_global_object_name': js_global_object_name,
})
if minfiy:
response_body = minify(response_body, mangle=True, mangle_toplevel=False)
if not request:
return response_body
else:
return HttpResponse(response_body, **{'content_type': 'application/javascript'})
def prepare_url_list(urlresolver, namespace_path='', namespace=''):
"""
returns list of tuples [(<url_name>, <url_patern_tuple> ), ...]
"""
exclude_ns = getattr(settings, 'JS_REVERSE_EXCLUDE_NAMESPACES', JS_EXCLUDE_NAMESPACES)
if namespace[:-1] in exclude_ns:
return
for url_name in urlresolver.reverse_dict.keys():
if isinstance(url_name, (text_type, str)):
url_patterns = []
for url_pattern in urlresolver.reverse_dict.getlist(url_name):
url_patterns += [
[namespace_path + pat[0], pat[1]] for pat in url_pattern[0]
]
yield [namespace + url_name, url_patterns]
for inner_ns, (inner_ns_path, inner_urlresolver) in \
urlresolver.namespace_dict.items():
inner_ns_path = namespace_path + inner_ns_path
inner_ns = namespace + inner_ns + ':'
# if we have inner_ns_path, reconstruct a new resolver so that we can
# handle regex substitutions within the regex of a namespace.
if inner_ns_path:
inner_urlresolver = urlresolvers.get_ns_resolver(inner_ns_path,
inner_urlresolver)
inner_ns_path = ''
for x in prepare_url_list(inner_urlresolver, inner_ns_path, inner_ns):
yield x
|
rs2/pandas
|
refs/heads/master
|
pandas/tests/indexes/test_numeric.py
|
1
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
import pandas._testing as tm
from pandas.tests.indexes.common import Base
class Numeric(Base):
def test_where(self):
# Tested in numeric.test_indexing
pass
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_format(self):
# GH35439
idx = self.create_index()
max_width = max(len(str(x)) for x in idx)
expected = [str(x).ljust(max_width) for x in idx]
assert idx.format() == expected
def test_numeric_compat(self):
pass # override Base method
def test_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
idx = self._holder(np.arange(5, dtype="int64"))
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
def test_index_groupby(self):
int_idx = Index(range(6))
float_idx = Index(np.arange(0, 0.6, 0.1))
obj_idx = Index("A B C D E F".split())
dt_idx = pd.date_range("2013-01-01", freq="M", periods=6)
for idx in [int_idx, float_idx, obj_idx, dt_idx]:
to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1])
tm.assert_dict_equal(
idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]}
)
to_groupby = Index(
[
datetime(2011, 11, 1),
datetime(2011, 12, 1),
pd.NaT,
pd.NaT,
datetime(2011, 12, 1),
datetime(2011, 11, 1),
],
tz="UTC",
).values
ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")]
expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
def test_insert_na(self, nulls_fixture):
# GH 18295 (test missing)
index = self.create_index()
if nulls_fixture is pd.NaT:
expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object)
else:
expected = Float64Index([index[0], np.nan] + list(index[1:]))
result = index.insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric):
_holder = Float64Index
@pytest.fixture(
params=[
[1.5, 2, 3, 4, 5],
[0.0, 2.5, 5.0, 7.5, 10.0],
[5, 4, 3, 2, 1.5],
[10.0, 7.5, 5.0, 2.5, 0.0],
],
ids=["mixed", "float", "mixed_dec", "float_dec"],
)
def index(self, request):
return Float64Index(request.param)
@pytest.fixture
def mixed_index(self):
return Float64Index([1.5, 2, 3, 4, 5])
@pytest.fixture
def float_index(self):
return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0])
def create_index(self) -> Float64Index:
return Float64Index(np.arange(5, dtype="float64"))
def test_repr_roundtrip(self, index):
tm.assert_index_equal(eval(repr(index)), index)
def check_is_index(self, i):
assert isinstance(i, Index)
assert not isinstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
assert a.equals(b)
tm.assert_index_equal(a, b, exact=False)
if is_float_index:
assert isinstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1, 2, 3, 4, 5])
assert isinstance(index, Float64Index)
expected = np.array([1, 2, 3, 4, 5], dtype="float64")
tm.assert_numpy_array_equal(index.values, expected)
index = Float64Index(np.array([1, 2, 3, 4, 5]))
assert isinstance(index, Float64Index)
index = Float64Index([1.0, 2, 3, 4, 5])
assert isinstance(index, Float64Index)
index = Float64Index(np.array([1.0, 2, 3, 4, 5]))
assert isinstance(index, Float64Index)
assert index.dtype == float
index = Float64Index(np.array([1.0, 2, 3, 4, 5]), dtype=np.float32)
assert isinstance(index, Float64Index)
assert index.dtype == np.float64
index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32)
assert isinstance(index, Float64Index)
assert index.dtype == np.float64
# nan handling
result = Float64Index([np.nan, np.nan])
assert pd.isna(result.values).all()
result = Float64Index(np.array([np.nan]))
assert pd.isna(result.values).all()
result = Index(np.array([np.nan]))
assert pd.isna(result.values).all()
@pytest.mark.parametrize(
"index, dtype",
[
(pd.Int64Index, "float64"),
(pd.UInt64Index, "categorical"),
(pd.Float64Index, "datetime64"),
(pd.RangeIndex, "float64"),
],
)
def test_invalid_dtype(self, index, dtype):
# GH 29539
with pytest.raises(
ValueError,
match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}",
):
index([1, 2, 3], dtype=dtype)
def test_constructor_invalid(self):
# invalid
msg = (
r"Float64Index\(\.\.\.\) must be called with a collection of "
r"some kind, 0\.0 was passed"
)
with pytest.raises(TypeError, match=msg):
Float64Index(0.0)
msg = (
"String dtype not supported, "
"you may need to explicitly cast to a numeric type"
)
with pytest.raises(TypeError, match=msg):
Float64Index(["a", "b", 0.0])
msg = r"float\(\) argument must be a string or a number, not 'Timestamp'"
with pytest.raises(TypeError, match=msg):
Float64Index([Timestamp("20130101")])
def test_constructor_coerce(self, mixed_index, float_index):
self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5]))
self.check_coerce(float_index, Index(np.arange(5) * 2.5))
self.check_coerce(
float_index, Index(np.array(np.arange(5) * 2.5, dtype=object))
)
def test_constructor_explicit(self, mixed_index, float_index):
# these don't auto convert
self.check_coerce(
float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False
)
self.check_coerce(
mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False
)
def test_type_coercion_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Index([1, 2, 3.5], dtype=any_int_dtype)
def test_type_coercion_valid(self, float_dtype):
# There is no Float32Index, so we always
# generate Float64Index.
i = Index([1, 2, 3.5], dtype=float_dtype)
tm.assert_index_equal(i, Index([1, 2, 3.5]))
def test_equals_numeric(self):
i = Float64Index([1.0, 2.0])
assert i.equals(i)
assert i.identical(i)
i2 = Float64Index([1.0, 2.0])
assert i.equals(i2)
i = Float64Index([1.0, np.nan])
assert i.equals(i)
assert i.identical(i)
i2 = Float64Index([1.0, np.nan])
assert i.equals(i2)
@pytest.mark.parametrize(
"other",
(
Int64Index([1, 2]),
Index([1.0, 2.0], dtype=object),
Index([1, 2], dtype=object),
),
)
def test_equals_numeric_other_index_type(self, other):
i = Float64Index([1.0, 2.0])
assert i.equals(other)
assert other.equals(i)
@pytest.mark.parametrize(
"vals",
[
pd.date_range("2016-01-01", periods=3),
pd.timedelta_range("1 Day", periods=3),
],
)
def test_lookups_datetimelike_values(self, vals):
# If we have datetime64 or timedelta64 values, make sure they are
# wrappped correctly GH#31163
ser = pd.Series(vals, index=range(3, 6))
ser.index = ser.index.astype("float64")
expected = vals[1]
with tm.assert_produces_warning(FutureWarning):
result = ser.index.get_value(ser, 4.0)
assert isinstance(result, type(expected)) and result == expected
with tm.assert_produces_warning(FutureWarning):
result = ser.index.get_value(ser, 4)
assert isinstance(result, type(expected)) and result == expected
result = ser[4.0]
assert isinstance(result, type(expected)) and result == expected
result = ser[4]
assert isinstance(result, type(expected)) and result == expected
result = ser.loc[4.0]
assert isinstance(result, type(expected)) and result == expected
result = ser.loc[4]
assert isinstance(result, type(expected)) and result == expected
result = ser.at[4.0]
assert isinstance(result, type(expected)) and result == expected
# GH#31329 .at[4] should cast to 4.0, matching .loc behavior
result = ser.at[4]
assert isinstance(result, type(expected)) and result == expected
result = ser.iloc[1]
assert isinstance(result, type(expected)) and result == expected
result = ser.iat[1]
assert isinstance(result, type(expected)) and result == expected
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
assert not i.isin([0]).item()
assert not i.isin([1]).item()
assert i.isin([np.nan]).item()
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False]))
tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False]))
tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True]))
tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True]))
i = Float64Index([1.0, 2.0])
tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False]))
def test_fillna_float64(self):
# GH 11343
idx = Index([1.0, np.nan, 3.0], dtype=float, name="x")
# can't downcast
exp = Index([1.0, 0.1, 3.0], name="x")
tm.assert_index_equal(idx.fillna(0.1), exp)
# downcast
exp = Float64Index([1.0, 2.0, 3.0], name="x")
tm.assert_index_equal(idx.fillna(2), exp)
# object
exp = Index([1.0, "obj", 3.0], name="x")
tm.assert_index_equal(idx.fillna("obj"), exp)
class NumericInt(Numeric):
def test_view(self):
i = self._holder([], name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view(self._dtype)
tm.assert_index_equal(i, self._holder(i_view, name="Foo"))
i_view = i.view(self._holder)
tm.assert_index_equal(i, self._holder(i_view, name="Foo"))
def test_is_monotonic(self):
index = self._holder([1, 2, 3, 4])
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index._is_strictly_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_decreasing is False
index = self._holder([4, 3, 2, 1])
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is True
index = self._holder([1])
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_is_strictly_monotonic(self):
index = self._holder([1, 1, 2, 3])
assert index.is_monotonic_increasing is True
assert index._is_strictly_monotonic_increasing is False
index = self._holder([3, 2, 1, 1])
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is False
index = self._holder([1, 1])
assert index.is_monotonic_increasing
assert index.is_monotonic_decreasing
assert not index._is_strictly_monotonic_increasing
assert not index._is_strictly_monotonic_decreasing
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self):
index = self.create_index()
i = Index(index.copy())
assert i.identical(index)
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.astype(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(i)
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype))
def test_union_noncomparable(self):
# corner case, non-Int64Index
index = self.create_index()
other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
result = index.union(other)
expected = Index(np.concatenate((index, other)))
tm.assert_index_equal(result, expected)
result = other.union(index)
expected = Index(np.concatenate((other, index)))
tm.assert_index_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
msg = (
"String dtype not supported, "
"you may need to explicitly cast to a numeric type"
)
# can't
data = ["foo", "bar", "baz"]
with pytest.raises(TypeError, match=msg):
self._holder(data)
# shouldn't
data = ["0", "1", "2"]
with pytest.raises(TypeError, match=msg):
self._holder(data)
def test_view_index(self):
index = self.create_index()
index.view(Index)
def test_prevent_casting(self):
index = self.create_index()
result = index.astype("O")
assert result.dtype == np.object_
class TestInt64Index(NumericInt):
_dtype = "int64"
_holder = Int64Index
@pytest.fixture(
params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"]
)
def index(self, request):
return Int64Index(request.param)
def create_index(self) -> Int64Index:
# return Int64Index(np.arange(5, dtype="int64"))
return Int64Index(range(0, 20, 2))
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = Index([-5, 0, 1, 2], dtype=np.int64)
tm.assert_index_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
tm.assert_index_equal(index, expected)
# scalar raise Exception
msg = (
r"Int64Index\(\.\.\.\) must be called with a collection of some "
"kind, 5 was passed"
)
with pytest.raises(TypeError, match=msg):
Int64Index(5)
# copy
arr = index.values
new_index = Int64Index(arr, copy=True)
tm.assert_index_equal(new_index, index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
assert new_index[0] != val
# interpret list-like
expected = Int64Index([5, 0])
for cls in [Index, Int64Index]:
for idx in [
cls([5, 0], dtype="int64"),
cls(np.array([5, 0]), dtype="int64"),
cls(Series([5, 0]), dtype="int64"),
]:
tm.assert_index_equal(idx, expected)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
assert index.values.dtype == np.int64
tm.assert_index_equal(index, Index(arr))
# preventing casting
arr = np.array([1, "2", 3, "4"], dtype=object)
with pytest.raises(TypeError, match="casting"):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with pytest.raises(TypeError, match="casting"):
Int64Index(arr_with_floats)
def test_constructor_coercion_signed_to_unsigned(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Index([-1], dtype=uint_dtype)
def test_constructor_unwraps_index(self):
idx = pd.Index([1, 2])
result = pd.Int64Index(idx)
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result._data, expected)
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
assert isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
assert isinstance(arr, Index)
def test_intersection(self):
index = self.create_index()
other = Index([1, 2, 3, 4, 5])
result = index.intersection(other)
expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(index)
expected = Index(
np.sort(np.asarray(np.intersect1d(index.values, other.values)))
)
tm.assert_index_equal(result, expected)
class TestUInt64Index(NumericInt):
_dtype = "uint64"
_holder = UInt64Index
@pytest.fixture(
params=[
[2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25],
[2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63],
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return UInt64Index(request.param)
@pytest.fixture
def index_large(self):
# large values used in TestUInt64Index where no compat needed with Int64/Float64
large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
return UInt64Index(large)
def create_index(self) -> UInt64Index:
# compat with shared Int64/Float64 tests; use index_large for UInt64 only tests
return UInt64Index(np.arange(5, dtype="uint64"))
def test_constructor(self):
idx = UInt64Index([1, 2, 3])
res = Index([1, 2, 3], dtype=np.uint64)
tm.assert_index_equal(res, idx)
idx = UInt64Index([1, 2 ** 63])
res = Index([1, 2 ** 63], dtype=np.uint64)
tm.assert_index_equal(res, idx)
idx = UInt64Index([1, 2 ** 63])
res = Index([1, 2 ** 63])
tm.assert_index_equal(res, idx)
idx = Index([-1, 2 ** 63], dtype=object)
res = Index(np.array([-1, 2 ** 63], dtype=object))
tm.assert_index_equal(res, idx)
# https://github.com/pandas-dev/pandas/issues/29526
idx = UInt64Index([1, 2 ** 63 + 1], dtype=np.uint64)
res = Index([1, 2 ** 63 + 1], dtype=np.uint64)
tm.assert_index_equal(res, idx)
def test_intersection(self, index_large):
other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20])
result = index_large.intersection(other)
expected = Index(np.sort(np.intersect1d(index_large.values, other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(index_large)
expected = Index(
np.sort(np.asarray(np.intersect1d(index_large.values, other.values)))
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "uint64"])
def test_int_float_union_dtype(dtype):
# https://github.com/pandas-dev/pandas/issues/26778
# [u]int | float -> float
index = pd.Index([0, 2, 3], dtype=dtype)
other = pd.Float64Index([0.5, 1.5])
expected = pd.Float64Index([0.0, 0.5, 1.5, 2.0, 3.0])
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
def test_range_float_union_dtype():
# https://github.com/pandas-dev/pandas/issues/26778
index = pd.RangeIndex(start=0, stop=3)
other = pd.Float64Index([0.5, 1.5])
result = index.union(other)
expected = pd.Float64Index([0.0, 0.5, 1, 1.5, 2.0])
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"box",
[list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)],
)
def test_uint_index_does_not_convert_to_float64(box):
# https://github.com/pandas-dev/pandas/issues/28279
# https://github.com/pandas-dev/pandas/issues/28023
series = pd.Series(
[0, 1, 2, 3, 4, 5],
index=[
7606741985629028552,
17876870360202815256,
17876870360202815256,
13106359306506049338,
8991270399732411471,
8991270399732411472,
],
)
result = series.loc[box([7606741985629028552, 17876870360202815256])]
expected = UInt64Index(
[7606741985629028552, 17876870360202815256, 17876870360202815256],
dtype="uint64",
)
tm.assert_index_equal(result.index, expected)
tm.assert_equal(result, series[:3])
def test_float64_index_equals():
# https://github.com/pandas-dev/pandas/issues/35217
float_index = pd.Index([1.0, 2, 3])
string_index = pd.Index(["1", "2", "3"])
result = float_index.equals(string_index)
assert result is False
result = string_index.equals(float_index)
assert result is False
def test_float64_index_difference():
# https://github.com/pandas-dev/pandas/issues/35217
float_index = pd.Index([1.0, 2, 3])
string_index = pd.Index(["1", "2", "3"])
result = float_index.difference(string_index)
tm.assert_index_equal(result, float_index)
result = string_index.difference(float_index)
tm.assert_index_equal(result, string_index)
class TestGetSliceBounds:
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])
def test_get_slice_bounds_within(self, kind, side, expected):
index = Index(range(6))
result = index.get_slice_bound(4, kind=kind, side=side)
assert result == expected
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)])
def test_get_slice_bounds_outside(self, kind, side, expected, bound):
index = Index(range(6))
result = index.get_slice_bound(bound, kind=kind, side=side)
assert result == expected
|
Morbotic/pronto-distro
|
refs/heads/master
|
externals/libbot-drc/bot2-procman/python/src/bot_procman/info2_t.py
|
19
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import deputy_cmd2_t
class info2_t(object):
__slots__ = ["utime", "host", "cpu_load", "phys_mem_total_bytes", "phys_mem_free_bytes", "swap_total_bytes", "swap_free_bytes", "ncmds", "cmds", "num_options", "option_names", "option_values"]
def __init__(self):
self.utime = 0
self.host = ""
self.cpu_load = 0.0
self.phys_mem_total_bytes = 0
self.phys_mem_free_bytes = 0
self.swap_total_bytes = 0
self.swap_free_bytes = 0
self.ncmds = 0
self.cmds = []
self.num_options = 0
self.option_names = []
self.option_values = []
def encode(self):
buf = StringIO.StringIO()
buf.write(info2_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">q", self.utime))
__host_encoded = self.host.encode('utf-8')
buf.write(struct.pack('>I', len(__host_encoded)+1))
buf.write(__host_encoded)
buf.write("\0")
buf.write(struct.pack(">fqqqqi", self.cpu_load, self.phys_mem_total_bytes, self.phys_mem_free_bytes, self.swap_total_bytes, self.swap_free_bytes, self.ncmds))
for i0 in range(self.ncmds):
assert self.cmds[i0]._get_packed_fingerprint() == deputy_cmd2_t.deputy_cmd2_t._get_packed_fingerprint()
self.cmds[i0]._encode_one(buf)
buf.write(struct.pack(">i", self.num_options))
for i0 in range(self.num_options):
__option_names_encoded = self.option_names[i0].encode('utf-8')
buf.write(struct.pack('>I', len(__option_names_encoded)+1))
buf.write(__option_names_encoded)
buf.write("\0")
for i0 in range(self.num_options):
__option_values_encoded = self.option_values[i0].encode('utf-8')
buf.write(struct.pack('>I', len(__option_values_encoded)+1))
buf.write(__option_values_encoded)
buf.write("\0")
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != info2_t._get_packed_fingerprint():
raise ValueError("Decode error")
return info2_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = info2_t()
self.utime = struct.unpack(">q", buf.read(8))[0]
__host_len = struct.unpack('>I', buf.read(4))[0]
self.host = buf.read(__host_len)[:-1].decode('utf-8', 'replace')
self.cpu_load, self.phys_mem_total_bytes, self.phys_mem_free_bytes, self.swap_total_bytes, self.swap_free_bytes, self.ncmds = struct.unpack(">fqqqqi", buf.read(40))
self.cmds = []
for i0 in range(self.ncmds):
self.cmds.append(deputy_cmd2_t.deputy_cmd2_t._decode_one(buf))
self.num_options = struct.unpack(">i", buf.read(4))[0]
self.option_names = []
for i0 in range(self.num_options):
__option_names_len = struct.unpack('>I', buf.read(4))[0]
self.option_names.append(buf.read(__option_names_len)[:-1].decode('utf-8', 'replace'))
self.option_values = []
for i0 in range(self.num_options):
__option_values_len = struct.unpack('>I', buf.read(4))[0]
self.option_values.append(buf.read(__option_values_len)[:-1].decode('utf-8', 'replace'))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if info2_t in parents: return 0
newparents = parents + [info2_t]
tmphash = (0x594fec8cdb8bc81c+ deputy_cmd2_t.deputy_cmd2_t._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if info2_t._packed_fingerprint is None:
info2_t._packed_fingerprint = struct.pack(">Q", info2_t._get_hash_recursive([]))
return info2_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
astrodsg/django-stock-up
|
refs/heads/master
|
stock_manager/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
nrejack/redi
|
refs/heads/master
|
test/TestUpdateTimestamp.py
|
3
|
#!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
'''
@author : Radha
email : rkandula@ufl.edu
This file tests for the function updateTimeStamp.
we call the function and check if the function updates the timestamps of all the subjects
if all the subject's timestamps are updated the status value stays 1
else the status value is flipped to 0 and loop is exited and Hence the test fails in
assertion
'''
import unittest
import os
from lxml import etree
from redi import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestUpdateTimestamp(unittest.TestCase):
def setUp(self):
# initialize the data with element tree
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_xml = """<?xml version="1.0" encoding="utf8"?>
<study>
<subject>
<NAME>TSH REFLEX</NAME>
<loinc_code>1552152</loinc_code>
<RESULT>0.74</RESULT>
<REFERENCE_LOW>0.27</REFERENCE_LOW>
<REFERENCE_HIGH>4.20</REFERENCE_HIGH>
<REFERENCE_UNIT>mIU/L</REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
<subject>
<NAME>HEP C RNA, QUANT REAL-TIME</NAME>
<loinc_code>740</loinc_code>
<RESULT>5.8</RESULT>
<REFERENCE_LOW></REFERENCE_LOW>
<REFERENCE_HIGH></REFERENCE_HIGH>
<REFERENCE_UNIT>log IU</REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
<subject>
<NAME>HCV QUANTITATIVE INTERPRETATION</NAME>
<loinc_code>1534483</loinc_code>
<RESULT>Detected</RESULT>
<REFERENCE_LOW></REFERENCE_LOW>
<REFERENCE_HIGH></REFERENCE_HIGH>
<REFERENCE_UNIT></REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
</study>"""
self.data = etree.ElementTree(etree.fromstring(self.test_xml))
self.input_date_format = "%Y-%m-%d %H:%M:%S"
self.output_date_format = "%Y-%m-%d"
def test_updateTimeStamp(self):
# add blank elements to each subject in data tree
redi.add_elements_to_tree(self.data)
# function to be tested
redi.update_time_stamp(self.data, self.input_date_format, self.output_date_format)
# output raw file to check it
#redi.write_element_tree_to_file(self.data, 'rawData.xml')
#initialize a dictionary for the timestamps
# key,value = timestamp, filled or not?(0/1)
isStampFilled = {}
for subject in self.data.iter('subject'):
ts = subject.find('timestamp').text
if not ts:
isStampFilled[ts] = 1
status=1
for key,value in isStampFilled.items():
if value != status:
status = 0
break
self.assertEqual(status,1)
def tearDown(self):
return()
if __name__ == '__main__':
unittest.main()
|
nazo/ansible
|
refs/heads/devel
|
test/units/modules/network/eos/test_eos_command.py
|
41
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_command
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosCommandModule(TestEosModule):
module = eos_command
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.eos.eos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
filename = 'eos_command_%s.txt' % filename
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_eos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Arista'))
def test_eos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Arista'))
def test_eos_command_wait_for(self):
wait_for = 'result[0] contains "Arista vEOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_eos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_eos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_eos_command_match_any(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_eos_command_match_all(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "Software image"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_eos_command_match_all_failure(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
|
AOKP/external_chromium_org
|
refs/heads/kitkat
|
native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/__init__.py
|
155
|
#!/usr/bin/env python
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
# Foundation; All Rights Reserved
"""A HTTPSConnection/Handler with additional proxy and cert validation features.
In particular, monkey patches in Python r74203 to provide support for CONNECT
proxies and adds SSL cert validation if the ssl module is present.
"""
__author__ = "{frew,nick.johnson}@google.com (Fred Wulff and Nick Johnson)"
import base64
import httplib
import logging
import re
import socket
import urllib2
from urllib import splittype
from urllib import splituser
from urllib import splitpasswd
class InvalidCertificateException(httplib.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s\n'
'To learn more, see '
'http://code.google.com/appengine/kb/general.html#rpcssl' %
(self.host, self.reason, self.cert))
def can_validate_certs():
"""Return True if we have the SSL package and can validate certificates."""
try:
import ssl
return True
except ImportError:
return False
def _create_fancy_connection(tunnel_host=None, key_file=None,
cert_file=None, ca_certs=None):
# This abomination brought to you by the fact that
# the HTTPHandler creates the connection instance in the middle
# of do_open so we need to add the tunnel host to the class.
class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
"""An HTTPS connection that uses a proxy defined by the enclosing scope."""
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self._tunnel_host = tunnel_host
if tunnel_host:
logging.debug("Creating preset proxy https conn: %s", tunnel_host)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
try:
import ssl
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
except ImportError:
pass
def _tunnel(self):
self._set_hostport(self._tunnel_host, None)
logging.info("Connecting through tunnel to: %s:%d",
self.host, self.port)
self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port))
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
(_, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error, "Tunnel connection failed: %d %s" % (
code, message.strip())
while True:
line = response.fp.readline()
if line == "\r\n":
break
def _get_valid_hosts_for_cert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
# Return a list of commonName fields
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _validate_certificate_hostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._get_valid_hosts_for_cert(cert)
for host in hosts:
# Convert the glob-style hostname expression (eg, '*.google.com') into a
# valid regular expression.
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
# TODO(frew): When we drop support for <2.6 (in the far distant future),
# change this to socket.create_connection.
self.sock = _create_connection((self.host, self.port))
if self._tunnel_host:
self._tunnel()
# ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
# with fallback.
try:
import ssl
self.sock = ssl.wrap_socket(self.sock,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
cert_reqs=self.cert_reqs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._validate_certificate_hostname(cert, hostname):
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
except ImportError:
ssl = socket.ssl(self.sock,
keyfile=self.key_file,
certfile=self.cert_file)
self.sock = httplib.FakeSocket(self.sock, ssl)
return PresetProxyHTTPSConnection
# Here to end of _create_connection copied wholesale from Python 2.6"s socket.py
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error, msg
class FancyRequest(urllib2.Request):
"""A request that allows the use of a CONNECT proxy."""
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._tunnel_host = None
self._key_file = None
self._cert_file = None
self._ca_certs = None
def set_proxy(self, host, type):
saved_type = None
if self.get_type() == "https" and not self._tunnel_host:
self._tunnel_host = self.get_host()
saved_type = self.get_type()
urllib2.Request.set_proxy(self, host, type)
if saved_type:
# Don't set self.type, we want to preserve the
# type for tunneling.
self.type = saved_type
def set_ssl_info(self, key_file=None, cert_file=None, ca_certs=None):
self._key_file = key_file
self._cert_file = cert_file
self._ca_certs = ca_certs
class FancyProxyHandler(urllib2.ProxyHandler):
"""A ProxyHandler that works with CONNECT-enabled proxies."""
# Taken verbatim from /usr/lib/python2.5/urllib2.py
def _parse_proxy(self, proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def proxy_open(self, req, proxy, type):
# This block is copied wholesale from Python2.6 urllib2.
# It is idempotent, so the superclass method call executes as normal
# if invoked.
orig_type = req.get_type()
proxy_type, user, password, hostport = self._parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = "%s:%s" % (urllib2.unquote(user), urllib2.unquote(password))
creds = base64.b64encode(user_pass).strip()
# Later calls overwrite earlier calls for the same header
req.add_header("Proxy-authorization", "Basic " + creds)
hostport = urllib2.unquote(hostport)
req.set_proxy(hostport, proxy_type)
# This condition is the change
if orig_type == "https":
return None
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type)
class FancyHTTPSHandler(urllib2.HTTPSHandler):
"""An HTTPSHandler that works with CONNECT-enabled proxies."""
def do_open(self, http_class, req):
# Intentionally very specific so as to opt for false negatives
# rather than false positives.
try:
return urllib2.HTTPSHandler.do_open(
self,
_create_fancy_connection(req._tunnel_host,
req._key_file,
req._cert_file,
req._ca_certs),
req)
except urllib2.URLError, url_error:
try:
import ssl
if (type(url_error.reason) == ssl.SSLError and
url_error.reason.args[0] == 1):
# Display the reason to the user. Need to use args for python2.5
# compat.
raise InvalidCertificateException(req.host, '',
url_error.reason.args[1])
except ImportError:
pass
raise url_error
# We have to implement this so that we persist the tunneling behavior
# through redirects.
class FancyRedirectHandler(urllib2.HTTPRedirectHandler):
"""A redirect handler that persists CONNECT-enabled proxy information."""
def redirect_request(self, req, *args, **kwargs):
new_req = urllib2.HTTPRedirectHandler.redirect_request(
self, req, *args, **kwargs)
# Same thing as in our set_proxy implementation, but in this case
# we"ve only got a Request to work with, so it was this or copy
# everything over piecemeal.
#
# Note that we do not persist tunneling behavior from an http request
# to an https request, because an http request does not set _tunnel_host.
#
# Also note that in Python < 2.6, you will get an error in
# FancyHTTPSHandler.do_open() on an https urllib2.Request that uses an http
# proxy, since the proxy type will be set to http instead of https.
# (FancyRequest, and urllib2.Request in Python >= 2.6 set the proxy type to
# https.) Such an urllib2.Request could result from this redirect
# if you are redirecting from an http request (since an an http request
# does not have _tunnel_host set, and thus you will not set the proxy
# in the code below), and if you have defined a proxy for https in, say,
# FancyProxyHandler, and that proxy has type http.
if hasattr(req, "_tunnel_host") and isinstance(new_req, urllib2.Request):
if new_req.get_type() == "https":
if req._tunnel_host:
# req is proxied, so copy the proxy info.
new_req._tunnel_host = new_req.get_host()
new_req.set_proxy(req.host, "https")
else:
# req is not proxied, so just make sure _tunnel_host is defined.
new_req._tunnel_host = None
new_req.type = "https"
if hasattr(req, "_key_file") and isinstance(new_req, urllib2.Request):
# Copy the auxiliary data in case this or any further redirect is https
new_req._key_file = req._key_file
new_req._cert_file = req._cert_file
new_req._ca_certs = req._ca_certs
return new_req
|
iut-ibk/DynaMind-UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/randstad/gridcell/percent_development_type_SSS_within_walking_distance.py
|
2
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class percent_development_type_SSS_within_walking_distance(Variable):
"""There is exactly one variable corresponding to each defined development type dynamic_land_use_variables,
where "?" is the development type group's NAME (e.g. residential, commercial).
100 * [sum over c in cell.walking_radius of (if c.development_type.dynamic_land_use_variables == N then 1 else 0)] /
(number of cells within walking distance)"""
_return_type="float32"
def __init__(self, type_name):
self.type_name = type_name
self.number_of_development_type_wwd = \
"number_of_development_type_"+self.type_name+"_within_walking_distance"
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label(self.number_of_development_type_wwd)]
def compute(self, dataset_pool):
urbansim_constant = dataset_pool.get_dataset('urbansim_constant')
return 100.0*self.get_dataset().get_attribute(self.number_of_development_type_wwd)/ \
float(urbansim_constant["walking_distance_footprint"].sum())
from numpy import array
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "randstad.gridcell.percent_development_type_residential_within_walking_distance"
#"1" in this case is the group number. was originally DDD, but have to specify it since this is a test, and the
#"parent code" isn't actually invoked
def test_my_inputs( self ):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='gridcells',
table_data={
'grid_id': array([1,2,3,4]),
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
'number_of_development_type_residential_within_walking_distance': array([3, 5, 1, 0])
}
)
storage.write_table(
table_name='urbansim_constants',
table_data={
"walking_distance_circle_radius": array([150]),
'cell_size': array([150]),
"acres": array([105.0]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
gridcell = dataset_pool.get_dataset('gridcell')
gridcell.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = gridcell.get_attribute(self.variable_name)
should_be = array( [3/5.0*100.0,
5/5.0*100.0,
1/5.0*100.0,
0/5.0*100.0] )
# TODO: This test is not testing anything
if __name__=='__main__':
opus_unittest.main()
|
mrosenstihl/PulsePrograms
|
refs/heads/master
|
FID/fid_res.py
|
1
|
import math
import scipy.signal
import numpy
from scipy.optimize import leastsq
import numpy as N
def lorenz(x,gamma,offs):
return gamma/(numpy.pi*((x-offs)**2+gamma**2))
def gauss(x,sigma,offs):
return 1/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((x-offs)/sigma)**2)
def voigt(p,x):
return numpy.convolve(gauss(x,p[0],p[2]),lorenz(x,p[1],p[2]),'same')
def result():
accu = Accumulation()
mag = MeasurementResult("Amplitude")
for timesignal in results:
if not isinstance(timesignal, ADC_Result):
print timesignal
continue
start = 25e-6 # interval used for FFT
end = 1 #
run = int(timesignal.get_description("run"))
sens = timesignal.get_description("sens")
data["Timesignal %s"%sens]=timesignal+0
if (((run)%4)==0):
accu += timesignal
data["Adjustsignal"]=timesignal
#data["Adjustspectrum"]=FFT(timesignal).base_corr(0.1).fft(zoom=(0,100e3))
#data["Adjustspectrumwide"]=FFT(timesignal).base_corr(0.1).fft()
elif (((run)%4)==1):
timesignal.y[1]*=-1
timesignal.y=[timesignal.y[1],timesignal.y[0]]
accu += timesignal
data["Adjustsignal"]=timesignal
#data["Adjustspectrum"]=FFT(timesignal).base_corr(0.1).fft(zoom=(0,100e3))
#data["Adjustspectrumwide"]=FFT(timesignal).base_corr(0.1).fft()
elif (((run)%4)==2):
accu -= timesignal
data["Adjustsignal"]=-timesignal
#data["Adjustspectrum"]=FFT(-timesignal).base_corr(0.1).fft(zoom=(0,100e3))
#data["Adjustspectrumwide"]=FFT(-timesignal).base_corr(0.1).fft()
elif (((run)%4)==3):
timesignal.y[0]*=-1
timesignal.y=[timesignal.y[1],timesignal.y[0]]
accu += timesignal
data["Adjustsignal"]=timesignal
#data["Adjustspectrum"]=FFT(timesignal).base_corr(0.1).fft(zoom=(0,100e3))
#data["Adjustspectrumwide"]=FFT(timesignal).base_corr(0.1).fft()
#r_start=max(0,int((start-timesignal.x[0])*timesignal.get_sampling_rate()))
#r_end=min(int((end-timesignal.x[0])*timesignal.get_sampling_rate()), len(timesignal))
data["Accumulation %s"%sens]=accu
echo = accu + 0
#echo.y[0] = accu.y[0][r_start:r_end]
#echo.y[1] = accu.y[1][r_start:r_end]
if (run+1) % 4 == 0:
pass
specmag = (accu+0).fft().magnitude()
#success=0
#p0 = [100.,100.,specmag.x[specmag.y[0].argmax()]]
#p1, success = leastsq(lambda p,x: specmag.y[0]/specmag.y[0].max() - voigt(p,x)/voigt(p,x).max(), p0, specmag.x, maxfev=1200, warning=True)
#numpy.savetxt( 'testy', voigt(p0,specmag.x)/voigt(p0,specmag.x).max() )
#numpy.savetxt( 'testx', specmag.x )
#pylab.show()
#if success >= 0:
# p0 = p1
# err = lambda p,x: specmag.y[0]/specmag.y[0].max() - voigt(p,x)/voigt(p,x).max()
# specmag.y[1][:] = voigt(p1,specmag.x)/voigt(p1,specmag.x).max()*specmag.y[0].max()
# print p1,success, sum((err(p1, specmag.x))**2)
data["AbsSpectrum %s"%sens]=specmag+0
data["Spectrum"]=(accu+0).fft().clip(-2e3,2e3)+0
noise = N.sqrt(accu.y[0][-256*1024:]**2 + accu.y[1][-256*1024:]**2).std()
signal = N.sqrt(accu.y[0]**2 + accu.y[1]**2).max()
print "Rauschen", noise
print "Signal", signal, signal/noise
print
accu = Accumulation()
pass
|
ludmilamarian/invenio
|
refs/heads/master
|
invenio/testsuite/test_manage.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the inveniomanage script."""
from __future__ import print_function
import sys
from invenio.base.utils import run_py_func
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
class Catcher(object):
"""Helper decorator to test raw_input."""
## see: http://stackoverflow.com/questions/13480632/python-stringio-selectively-place-data-into-stdin
def __init__(self, handler):
self.handler = handler
self.inputs = []
def __enter__(self):
self.__stdin = sys.stdin
self.__stdout = sys.stdout
sys.stdin = self
sys.stdout = self
def __exit__(self, type, value, traceback):
sys.stdin = self.__stdin
sys.stdout = self.__stdout
def write(self, value):
self.__stdout.write(value)
result = self.handler(value)
if result is not None:
self.inputs.append(result)
def readline(self):
return self.inputs.pop()
def getvalue(self):
return self.__stdout.getvalue()
def truncate(self, pos):
return self.__stdout.truncate(pos)
class InveniomanageTest(InvenioTestCase):
def test_upgrade_show_applied_cmd(self):
""" Test `upgrade show applied` command. """
from invenio.modules.upgrader.manage import main
out = run_py_func(main, 'upgrader show applied').out
expected = '>>> Following upgrade(s) have been applied:'
self.assertTrue(expected in out.split('\n'),
"%s was not found in output %s" % (expected, out))
def test_upgrade_show_pending_cmd(self):
""" Test `upgrade show pending` command. """
from invenio.modules.upgrader.manage import main
out = run_py_func(main, 'upgrader show pending').out
lines = out.split('\n')
expected = ('>>> Following upgrade(s) are ready to be applied:',
'>>> All upgrades have been applied.')
self.assertTrue(expected[0] in lines or expected[1] in lines,
"%s was not found in output %s" % (expected, lines))
def test_signals_usage(self):
""" Test signal handling. """
from invenio.base.scripts.database import main as db_main
from invenio.base.signals import pre_command, post_command
from invenio.base.manage import main, version as im_version
def pre_handler_version(sender, *args, **kwargs):
print('>>> pre_handler_version')
def post_handler_version(sender, *args, **kwargs):
print('>>> post_handler_version')
# Bind only `inveniomanage version` command to pre/post handler.
pre_command.connect(pre_handler_version, sender=im_version)
post_command.connect(post_handler_version, sender=im_version)
def pre_handler_general_test(sender, *args, **kwargs):
print('>>> pre_handler_general')
def post_handler_general_test(sender, *args, **kwargs):
print('>>> post_handler_general')
# Bind all commands to pre/post general handler.
pre_command.connect(pre_handler_general_test)
pre_command.connect(post_handler_general_test)
# Expect both version and general handlers.
out = run_py_func(main, 'inveniomanage version').out
lines = out.split('\n')
expected = ('>>> pre_handler_version',
'>>> post_handler_version',
'>>> pre_handler_general',
'>>> post_handler_general')
for line in expected:
self.assertTrue(line in lines,
"%s was not found in output %s" % (line, lines))
# Expect only general handlers.
out = run_py_func(db_main, 'database uri').out
lines = out.split('\n')
expected = ('>>> pre_handler_general',
'>>> post_handler_general')
for line in expected:
self.assertTrue(line in lines,
"%s was not found in output %s" % (line, lines))
notexpected = ('>>> pre_handler_version',
'>>> post_handler_version')
for line in notexpected:
self.assertFalse(line in lines,
"%s was not expected in output %s" % (line, lines))
TEST_SUITE = make_test_suite(InveniomanageTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
totalgood/twip
|
refs/heads/master
|
tweetget/settings.py
|
1
|
import os
# to get your own keys:
# 1. go to https://apps.twitter.com/
# 2. fill out form and hit create
# 3. go to app and click "manage keys and access tokens"
# 4. copy into your bashrc
TWITTER_API_KEY = os.environ['TWITTER_API_KEY']
TWITTER_API_SECRET = os.environ['TWITTER_API_SECRET']
# Twitter allows 160 queries per 15 minute window
# RATE_LIMIT = 5 # requests every 15 minutes, max is 450 for app twitter api
RATE_LIMIT = 1 # requests every 15 minutes, max is 450 for app twitter api
RATE_LIMIT_WINDOW = 900 # 15 minutes * 60
DATA_PATH = './data'
QUERIES = ['python -monty', '#sarcasm', '#happy', '#sad']
|
Arthurvdmerwe/AS2805_Python_Implementation
|
refs/heads/master
|
Shared/ASResponseCodes.py
|
3
|
__author__ = 'arthurvandermerwe'
_AS2805ResponseText = {
"00": "Approved or completed successfully",
"01": "Refer to card issuer",
"02": "Refer to card issuer, special condition",
"03": "Invalid merchant",
"04": "Pick-up card",
"05": "Do not honor",
"06": "Error",
"07": "Pick-up card, special condition",
"08": "Honor with identification",
"09": "Request in progress",
"10": "Approved, partial",
"11": "Approved, VIP",
"12": "Invalid transaction",
"13": "Invalid amount",
"14": "Invalid card number",
"15": "No such issuer",
"16": "Approved, update track 3",
"17": "Customer cancellation",
"18": "Customer dispute",
"19": "Re-enter transaction",
"20": "Invalid response",
"21": "No action taken",
"22": "Suspected malfunction",
"23": "Unacceptable transaction fee",
"24": "File update not supported",
"25": "Unable to locate record",
"26": "Duplicate record",
"27": "File update field edit error",
"28": "File update file locked",
"29": "File update failed",
"30": "Format error",
"31": "Bank not supported",
"32": "Completed partially",
"33": "Expired card, pick-up",
"34": "Suspected fraud, pick-up",
"35": "Contact acquirer, pick-up",
"36": "Restricted card, pick-up",
"37": "Call acquirer security, pick-up",
"38": "PIN tries exceeded, pick-up",
"39": "No credit account",
"40": "Function not supported",
"41": "Lost card, pick-up",
"42": "No universal account",
"43": "Stolen card, pick-up",
"44": "No investment account",
"45": "Account closed",
"46": "Identification required",
"47": "Identification cross-check required",
"48": "Reserved for future use",
"49": "Reserved for future use",
"50": "Reserved for future use",
"51": "Not sufficient funds",
"52": "No check account",
"53": "No savings account",
"54": "Expired card",
"55": "Incorrect PIN",
"56": "No card record",
"57": "Transaction not permitted to cardholder",
"58": "Transaction not permitted on terminal",
"59": "Suspected fraud",
"60": "Contact acquirer",
"61": "Exceeds withdrawal limit",
"62": "Restricted card",
"63": "Security violation",
"64": "Original amount incorrect",
"65": "Exceeds withdrawal frequency",
"66": "Call acquirer security",
"67": "Hard capture",
"68": "Response received too late",
"69": "Advice received too late",
"70": "Reserved for future use",
"71": "Reserved for future use",
"72": "Reserved for future use",
"73": "Reserved for future use",
"74": "Reserved for future use",
"75": "PIN tries exceeded",
"76": "Reserved for future use",
"77": "Intervene, bank approval required",
"78": "Intervene, bank approval required for partial amount",
"79": "Reserved for client-specific use (declined)",
"80": "Reserved for client-specific use (declined)",
"81": "Reserved for client-specific use (declined)",
"82": "Reserved for client-specific use (declined)",
"83": "Reserved for client-specific use (declined)",
"84": "Reserved for client-specific use (declined)",
"85": "Reserved for client-specific use (declined)",
"86": "Reserved for client-specific use (declined)",
"87": "Reserved for client-specific use (declined)",
"88": "Reserved for client-specific use (declined)",
"89": "Reserved for client-specific use (declined)",
"90": "Cut-off in progress",
"91": "Issuer or switch inoperative",
"92": "Routing error",
"93": "Violation of law",
"94": "Duplicate transaction",
"95": "Reconcile error",
"96": "System malfunction",
"97": "Reserved for future use",
"98": "Exceeds cash limit",
"99": "Reserved for future use"}
def GetISOResponseText(code):
txt = "Unmapped Response"
try:
txt = _AS2805ResponseText[code]
except:
pass
return txt
if __name__ == '__main__':
print GetISOResponseText("00")
print GetISOResponseText("PP")
|
hoosteeno/fjord
|
refs/heads/master
|
vendor/packages/requests-2.7.0/requests/packages/urllib3/contrib/pyopenssl.py
|
488
|
'''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
|
flgiordano/netcash
|
refs/heads/master
|
+/google-cloud-sdk/lib/third_party/rsa/asn1.py
|
89
|
'''ASN.1 definitions.
Not all ASN.1-handling code use these definitions, but when it does, they should be here.
'''
from pyasn1.type import univ, namedtype, tag
class PubKeyHeader(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Null()),
)
class OpenSSLPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PubKeyHeader()),
# This little hack (the implicit tag) allows us to get a Bit String as Octet String
namedtype.NamedType('key', univ.OctetString().subtype(
implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
)
class AsnPubKey(univ.Sequence):
'''ASN.1 contents of DER encoded public key:
RSAPublicKey ::= SEQUENCE {
modulus INTEGER, -- n
publicExponent INTEGER, -- e
'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
|
bepitulaz/huntingdimana
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py
|
2800
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
was4444/chromium.src
|
refs/heads/nw15
|
chrome/browser/web_dev_style/closure_lint_test.py
|
19
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import js_checker
from os import path as os_path
import re
from sys import path as sys_path
import unittest
_HERE = os_path.dirname(os_path.abspath(__file__))
sys_path.append(os_path.join(_HERE, '..', '..', '..', 'build'))
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
class ClosureLintTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.os_path = os_path
input_api.re = re
input_api.change = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(input_api.change, 'RepositoryRoot')
src_root = os_path.join(os_path.dirname(__file__), '..', '..', '..')
input_api.change.RepositoryRoot().MultipleTimes().AndReturn(src_root)
output_api = self.mox.CreateMockAnything()
self.mox.ReplayAll()
self.checker = js_checker.JSChecker(input_api, output_api)
def ShouldPassClosureLint(self, source):
errors = self.checker.ClosureLint('', source=source)
for error in errors:
print 'Error: ' + error.message
self.assertListEqual([], errors)
def testBindFalsePositives(self):
sources = [
[
'var addOne = function(prop) {\n',
' this[prop] += 1;\n',
'}.bind(counter, timer);\n',
'\n',
'setInterval(addOne, 1000);\n',
'\n',
],
[
'/** Da clickz. */\n',
'button.onclick = function() { this.add_(this.total_); }.bind(this);\n',
],
]
for source in sources:
self.ShouldPassClosureLint(source)
def testPromiseFalsePositives(self):
sources = [
[
'Promise.reject(1).catch(function(error) {\n',
' alert(error);\n',
'});\n',
],
[
'var loaded = new Promise();\n',
'loaded.then(runAwesomeApp);\n',
'loaded.catch(showSadFace);\n',
'\n',
'/** Da loadz. */\n',
'document.onload = function() { loaded.resolve(); };\n',
'\n',
'/** Da errorz. */\n',
'document.onerror = function() { loaded.reject(); };\n',
'\n',
"if (document.readystate == 'complete') loaded.resolve();\n",
],
]
for source in sources:
self.ShouldPassClosureLint(source)
if __name__ == '__main__':
unittest.main()
|
rzr/PyBitmessage
|
refs/heads/master
|
src/debug.py
|
11
|
# -*- coding: utf-8 -*-
'''
Logging and debuging facility
=============================
Levels:
DEBUG Detailed information, typically of interest only when diagnosing problems.
INFO Confirmation that things are working as expected.
WARNING An indication that something unexpected happened, or indicative of some problem in the
near future (e.g. ‘disk space low’). The software is still working as expected.
ERROR Due to a more serious problem, the software has not been able to perform some function.
CRITICAL A serious error, indicating that the program itself may be unable to continue running.
There are three loggers: `console_only`, `file_only` and `both`.
Use: `from debug import logger` to import this facility into whatever module you wish to log messages from.
Logging is thread-safe so you don't have to worry about locks, just import and log.
'''
import logging
import logging.config
import shared
import sys
import helper_startup
helper_startup.loadConfig()
# TODO(xj9): Get from a config file.
log_level = 'DEBUG'
def configureLogging():
logging.config.dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '%(asctime)s - %(levelname)s - %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': log_level,
'stream': 'ext://sys.stdout'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'level': log_level,
'filename': shared.appdata + 'debug.log',
'maxBytes': 2097152, # 2 MiB
'backupCount': 1,
}
},
'loggers': {
'console_only': {
'handlers': ['console'],
'propagate' : 0
},
'file_only': {
'handlers': ['file'],
'propagate' : 0
},
'both': {
'handlers': ['console', 'file'],
'propagate' : 0
},
},
'root': {
'level': log_level,
'handlers': ['console'],
},
})
# TODO (xj9): Get from a config file.
#logger = logging.getLogger('console_only')
configureLogging()
if '-c' in sys.argv:
logger = logging.getLogger('file_only')
else:
logger = logging.getLogger('both')
def restartLoggingInUpdatedAppdataLocation():
global logger
for i in list(logger.handlers):
logger.removeHandler(i)
i.flush()
i.close()
configureLogging()
if '-c' in sys.argv:
logger = logging.getLogger('file_only')
else:
logger = logging.getLogger('both')
|
Johanndutoit/PyCon2013AppEngineTutorial
|
refs/heads/master
|
lib/python2.7/site-packages/pip/runner.py
|
658
|
import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
|
yiwen-luo/LeetCode
|
refs/heads/master
|
Python/sudoku-solver.py
|
3
|
# Time: ((9!)^9)
# Space: (1)
#
# Write a program to solve a Sudoku puzzle by filling the empty cells.
#
# Empty cells are indicated by the character '.'.
#
# You may assume that there will be only one unique solution.
#
class Solution:
# @param board, a 9x9 2D array
# Solve the Sudoku by modifying the input board in-place.
# Do not return any value.
def solveSudoku(self, board):
def isValid(board, x, y):
for i in xrange(9):
if i != x and board[i][y] == board[x][y]:
return False
for j in xrange(9):
if j != y and board[x][j] == board[x][y]:
return False
i = 3 * (x / 3)
while i < 3 * (x / 3 + 1):
j = 3 * (y / 3)
while j < 3 * (y / 3 + 1):
if (i != x or j != y) and board[i][j] == board[x][y]:
return False
j += 1
i += 1
return True
def solver(board):
for i in xrange(len(board)):
for j in xrange(len(board[0])):
if(board[i][j] == '.'):
for k in xrange(9):
board[i][j] = chr(ord('1') + k)
if isValid(board, i, j) and solver(board):
return True
board[i][j] = '.'
return False
return True
solver(board)
|
wiltonlazary/arangodb
|
refs/heads/devel
|
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/pyasn1/pyasn1/type/tag.py
|
200
|
# ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0]|tagClass,
self.__tag[1]|tagFormat,
self.__tag[2]|tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self): return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedSuperTags
def __len__(self): return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/resolve/FStringComprehensionTarget.py
|
31
|
xs = [f'{foo}' + foo for foo in range(10)]
<ref>
|
CroceRossaItaliana/jorvik
|
refs/heads/master
|
base/migrations/0012_allegato_scadenza_popola.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.db.models import Q
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Allegato = apps.get_model("base", "Allegato")
db_alias = schema_editor.connection.alias
giorno_migrazione = datetime.datetime(2016, 2, 25)
scadenza = datetime.datetime(2016, 2, 24)
allegati = Allegato.objects.filter(
creazione__lt=giorno_migrazione
)
numero = allegati.count()
print(" => base-0012 ci sono %d allegati senza scadenza per i quali verra popolata" % (numero,))
allegati.update(scadenza=scadenza)
class Migration(migrations.Migration):
dependencies = [
('base', '0011_allegato_scadenza'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
cnsoft/kbengine-cocos2dx
|
refs/heads/cocos2dx-cnsoft
|
kbe/src/lib/python/Lib/test/cjkencodings_test.py
|
16
|
teststring = {
'big5': (
b"\xa6\x70\xa6\xf3\xa6\x62\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xa4"
b"\xa8\xcf\xa5\xce\xac\x4a\xa6\xb3\xaa\xba\x20\x43\x20\x6c\x69\x62"
b"\x72\x61\x72\x79\x3f\x0a\xa1\x40\xa6\x62\xb8\xea\xb0\x54\xac\xec"
b"\xa7\xde\xa7\xd6\xb3\x74\xb5\x6f\xae\x69\xaa\xba\xa4\xb5\xa4\xd1"
b"\x2c\x20\xb6\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xb3\x6e\xc5\xe9"
b"\xaa\xba\xb3\x74\xab\xd7\xac\x4f\xa4\xa3\xae\x65\xa9\xbf\xb5\xf8"
b"\xaa\xba\x0a\xbd\xd2\xc3\x44\x2e\x20\xac\xb0\xa5\x5b\xa7\xd6\xb6"
b"\x7d\xb5\x6f\xa4\xce\xb4\xfa\xb8\xd5\xaa\xba\xb3\x74\xab\xd7\x2c"
b"\x20\xa7\xda\xad\xcc\xab\x4b\xb1\x60\xa7\xc6\xb1\xe6\xaf\xe0\xa7"
b"\x51\xa5\xce\xa4\x40\xa8\xc7\xa4\x77\xb6\x7d\xb5\x6f\xa6\x6e\xaa"
b"\xba\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xa8\xc3\xa6\xb3\xa4"
b"\x40\xad\xd3\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79"
b"\x70\x69\x6e\x67\x20\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d"
b"\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20\xa5\x69\x0a"
b"\xa8\xd1\xa8\xcf\xa5\xce\x2e\x20\xa5\xd8\xab\x65\xa6\xb3\xb3\x5c"
b"\xb3\x5c\xa6\x68\xa6\x68\xaa\xba\x20\x6c\x69\x62\x72\x61\x72\x79"
b"\x20\xac\x4f\xa5\x48\x20\x43\x20\xbc\x67\xa6\xa8\x2c\x20\xa6\xd3"
b"\x20\x50\x79\x74\x68\x6f\x6e\x20\xac\x4f\xa4\x40\xad\xd3\x0a\x66"
b"\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20"
b"\xaa\xba\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c"
b"\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xac\x47\xa7\xda\xad\xcc\xa7"
b"\xc6\xb1\xe6\xaf\xe0\xb1\x4e\xac\x4a\xa6\xb3\xaa\xba\x0a\x43\x20"
b"\x6c\x69\x62\x72\x61\x72\x79\x20\xae\xb3\xa8\xec\x20\x50\x79\x74"
b"\x68\x6f\x6e\x20\xaa\xba\xc0\xf4\xb9\xd2\xa4\xa4\xb4\xfa\xb8\xd5"
b"\xa4\xce\xbe\xe3\xa6\x58\x2e\x20\xa8\xe4\xa4\xa4\xb3\xcc\xa5\x44"
b"\xad\x6e\xa4\x5d\xac\x4f\xa7\xda\xad\xcc\xa9\xd2\x0a\xad\x6e\xb0"
b"\x51\xbd\xd7\xaa\xba\xb0\xdd\xc3\x44\xb4\x4e\xac\x4f\x3a\x0a\x0a",
b"\xe5\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89"
b"\xe7\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3"
b"\x80\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a"
b"\x80\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84"
b"\xe4\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f"
b"\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84"
b"\xe9\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5"
b"\xbf\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e"
b"\x20\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc"
b"\xe5\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5"
b"\xba\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8"
b"\xe5\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4"
b"\xb8\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5"
b"\xbd\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8"
b"\xa6\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20"
b"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20"
b"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67"
b"\x75\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7"
b"\x94\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1"
b"\xe8\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62"
b"\x72\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf"
b"\xab\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20"
b"\x70\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20"
b"\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67"
b"\x75\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5"
b"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c"
b"\x89\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6"
b"\x8b\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84"
b"\xe7\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5"
b"\x8f\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad"
b"\xe6\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6"
b"\x88\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8"
b"\xab\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98"
b"\xaf\x3a\x0a\x0a"),
'big5hkscs': (
b"\x88\x45\x88\x5c\x8a\x73\x8b\xda\x8d\xd8\x0a\x88\x66\x88\x62\x88"
b"\xa7\x20\x88\xa7\x88\xa3\x0a",
b"\xf0\xa0\x84\x8c\xc4\x9a\xe9\xb5\xae\xe7\xbd\x93\xe6\xb4\x86\x0a"
b"\xc3\x8a\xc3\x8a\xcc\x84\xc3\xaa\x20\xc3\xaa\xc3\xaa\xcc\x84\x0a"),
'cp949': (
b"\x8c\x63\xb9\xe6\xb0\xa2\xc7\xcf\x20\xbc\x84\xbd\xc3\xc4\xdd\xb6"
b"\xf3\x0a\x0a\xa8\xc0\xa8\xc0\xb3\xb3\x21\x21\x20\xec\xd7\xce\xfa"
b"\xea\xc5\xc6\xd0\x92\xe6\x90\x70\xb1\xc5\x20\xa8\xde\xa8\xd3\xc4"
b"\x52\xa2\xaf\xa2\xaf\xa2\xaf\x20\xb1\xe0\x8a\x96\x20\xa8\xd1\xb5"
b"\xb3\x20\xa8\xc0\x2e\x20\x2e\x0a\xe4\xac\xbf\xb5\xa8\xd1\xb4\xc9"
b"\xc8\xc2\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xbc\xad\xbf\xef\xb7"
b"\xef\x20\xb5\xaf\xc7\xd0\xeb\xe0\x20\xca\xab\xc4\x52\x20\x21\x20"
b"\x21\x20\x21\xa4\xd0\x2e\xa4\xd0\x0a\xc8\xe5\xc8\xe5\xc8\xe5\x20"
b"\xa4\xa1\xa4\xa1\xa4\xa1\xa1\xd9\xa4\xd0\x5f\xa4\xd0\x20\xbe\xee"
b"\x90\x8a\x20\xc5\xcb\xc4\xe2\x83\x4f\x20\xb5\xae\xc0\xc0\x20\xaf"
b"\x68\xce\xfa\xb5\xe9\xeb\xe0\x20\xa8\xc0\xb5\xe5\x83\x4f\x0a\xbc"
b"\xb3\x90\x6a\x20\xca\xab\xc4\x52\x20\x2e\x20\x2e\x20\x2e\x20\x2e"
b"\x20\xb1\xbc\xbe\xd6\x9a\x66\x20\xa8\xd1\xb1\xc5\x20\xa8\xde\x90"
b"\x74\xa8\xc2\x83\x4f\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9"
b"\xb1\xee\xa3\x8e\x0a\xbf\xcd\xbe\xac\xc4\x52\x20\x21\x20\x21\x20"
b"\xe4\xac\xbf\xb5\xa8\xd1\x20\xca\xab\xb4\xc9\xb1\xc5\x20\xa1\xd9"
b"\xdf\xbe\xb0\xfc\x20\xbe\xf8\xb4\xc9\xb1\xc5\xb4\xc9\x20\xe4\xac"
b"\xb4\xc9\xb5\xd8\xc4\x52\x20\xb1\xdb\xbe\xd6\x8a\xdb\x0a\xa8\xde"
b"\xb7\xc1\xb5\xe0\xce\xfa\x20\x9a\xc3\xc7\xb4\xbd\xa4\xc4\x52\x20"
b"\xbe\xee\x90\x8a\x20\xec\xd7\xec\xd2\xf4\xb9\xe5\xfc\xf1\xe9\x9a"
b"\xc4\xa8\xef\xb5\xe9\x9d\xda\x21\x21\x20\xa8\xc0\xa8\xc0\xb3\xb3"
b"\xa2\xbd\x20\xa1\xd2\xa1\xd2\x2a\x0a\x0a",
b"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2"
b"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89"
b"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88"
b"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1"
b"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d"
b"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20"
b"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a"
b"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8"
b"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae"
b"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85"
b"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3"
b"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20"
b"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90"
b"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93"
b"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec"
b"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e"
b"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2"
b"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea"
b"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4"
b"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed"
b"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94"
b"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8"
b"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a"
b"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea"
b"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb"
b"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed"
b"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81"
b"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb"
b"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82"
b"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"),
'euc_jisx0213': (
b"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1"
b"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9"
b"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9"
b"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64"
b"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6"
b"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5"
b"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7"
b"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6"
b"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20"
b"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb"
b"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4"
b"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4"
b"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4"
b"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5"
b"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4"
b"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1"
b"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3"
b"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3"
b"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4"
b"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75"
b"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2"
b"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5"
b"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4"
b"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4"
b"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8"
b"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5"
b"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4"
b"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9"
b"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4"
b"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce"
b"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec"
b"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8"
b"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7"
b"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0"
b"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea"
b"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4"
b"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e"
b"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe"
b"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4"
b"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4"
b"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab"
b"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2"
b"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca"
b"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5"
b"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9"
b"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50"
b"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc"
b"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a\xa5\xce\xa4\xf7\x20\xa5\xfe\x20"
b"\xa5\xc8\xa5\xad\xaf\xac\xaf\xda\x20\xcf\xe3\x8f\xfe\xd8\x20\x8f"
b"\xfe\xd4\x8f\xfe\xe8\x8f\xfc\xd6\x0a",
b"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
b"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
b"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
b"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
b"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
b"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
b"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
b"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
b"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
b"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
b"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
b"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
b"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
b"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
b"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
b"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
b"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
b"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
b"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
b"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
b"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
b"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
b"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
b"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
b"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
b"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
b"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
b"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
b"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
b"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
b"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
b"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
b"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
b"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
b"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
b"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
b"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
b"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
b"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
b"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
b"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
b"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
b"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
b"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
b"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
b"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
b"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
b"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
b"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
b"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
b"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
b"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
b"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
b"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
b"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
b"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
b"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
b"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
b"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
b"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20"
b"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6"
b"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80"
b"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"),
'euc_jp': (
b"\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb3\xab\xc8\xaf\xa4\xcf\xa1"
b"\xa2\x31\x39\x39\x30\x20\xc7\xaf\xa4\xb4\xa4\xed\xa4\xab\xa4\xe9"
b"\xb3\xab\xbb\xcf\xa4\xb5\xa4\xec\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9"
b"\xa1\xa3\x0a\xb3\xab\xc8\xaf\xbc\xd4\xa4\xce\x20\x47\x75\x69\x64"
b"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\xa4\xcf\xb6"
b"\xb5\xb0\xe9\xcd\xd1\xa4\xce\xa5\xd7\xa5\xed\xa5\xb0\xa5\xe9\xa5"
b"\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa1\xd6\x41\x42\x43\xa1\xd7"
b"\xa4\xce\xb3\xab\xc8\xaf\xa4\xcb\xbb\xb2\xb2\xc3\xa4\xb7\xa4\xc6"
b"\xa4\xa4\xa4\xde\xa4\xb7\xa4\xbf\xa4\xac\xa1\xa2\x41\x42\x43\x20"
b"\xa4\xcf\xbc\xc2\xcd\xd1\xbe\xe5\xa4\xce\xcc\xdc\xc5\xaa\xa4\xcb"
b"\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xc5\xac\xa4\xb7\xa4\xc6\xa4\xa4"
b"\xa4\xde\xa4\xbb\xa4\xf3\xa4\xc7\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4"
b"\xb3\xa4\xce\xa4\xbf\xa4\xe1\xa1\xa2\x47\x75\x69\x64\x6f\x20\xa4"
b"\xcf\xa4\xe8\xa4\xea\xbc\xc2\xcd\xd1\xc5\xaa\xa4\xca\xa5\xd7\xa5"
b"\xed\xa5\xb0\xa5\xe9\xa5\xdf\xa5\xf3\xa5\xb0\xb8\xc0\xb8\xec\xa4"
b"\xce\xb3\xab\xc8\xaf\xa4\xf2\xb3\xab\xbb\xcf\xa4\xb7\xa1\xa2\xb1"
b"\xd1\xb9\xf1\x20\x42\x42\x53\x20\xca\xfc\xc1\xf7\xa4\xce\xa5\xb3"
b"\xa5\xe1\xa5\xc7\xa5\xa3\xc8\xd6\xc1\xc8\xa1\xd6\xa5\xe2\xa5\xf3"
b"\xa5\xc6\xa5\xa3\x20\xa5\xd1\xa5\xa4\xa5\xbd\xa5\xf3\xa1\xd7\xa4"
b"\xce\xa5\xd5\xa5\xa1\xa5\xf3\xa4\xc7\xa4\xa2\xa4\xeb\x20\x47\x75"
b"\x69\x64\x6f\x20\xa4\xcf\xa4\xb3\xa4\xce\xb8\xc0\xb8\xec\xa4\xf2"
b"\xa1\xd6\x50\x79\x74\x68\x6f\x6e\xa1\xd7\xa4\xc8\xcc\xbe\xa4\xc5"
b"\xa4\xb1\xa4\xde\xa4\xb7\xa4\xbf\xa1\xa3\x0a\xa4\xb3\xa4\xce\xa4"
b"\xe8\xa4\xa6\xa4\xca\xc7\xd8\xb7\xca\xa4\xab\xa4\xe9\xc0\xb8\xa4"
b"\xde\xa4\xec\xa4\xbf\x20\x50\x79\x74\x68\x6f\x6e\x20\xa4\xce\xb8"
b"\xc0\xb8\xec\xc0\xdf\xb7\xd7\xa4\xcf\xa1\xa2\xa1\xd6\xa5\xb7\xa5"
b"\xf3\xa5\xd7\xa5\xeb\xa1\xd7\xa4\xc7\xa1\xd6\xbd\xac\xc6\xc0\xa4"
b"\xac\xcd\xc6\xb0\xd7\xa1\xd7\xa4\xc8\xa4\xa4\xa4\xa6\xcc\xdc\xc9"
b"\xb8\xa4\xcb\xbd\xc5\xc5\xc0\xa4\xac\xc3\xd6\xa4\xab\xa4\xec\xa4"
b"\xc6\xa4\xa4\xa4\xde\xa4\xb9\xa1\xa3\x0a\xc2\xbf\xa4\xaf\xa4\xce"
b"\xa5\xb9\xa5\xaf\xa5\xea\xa5\xd7\xa5\xc8\xb7\xcf\xb8\xc0\xb8\xec"
b"\xa4\xc7\xa4\xcf\xa5\xe6\xa1\xbc\xa5\xb6\xa4\xce\xcc\xdc\xc0\xe8"
b"\xa4\xce\xcd\xf8\xca\xd8\xc0\xad\xa4\xf2\xcd\xa5\xc0\xe8\xa4\xb7"
b"\xa4\xc6\xbf\xa7\xa1\xb9\xa4\xca\xb5\xa1\xc7\xbd\xa4\xf2\xb8\xc0"
b"\xb8\xec\xcd\xd7\xc1\xc7\xa4\xc8\xa4\xb7\xa4\xc6\xbc\xe8\xa4\xea"
b"\xc6\xfe\xa4\xec\xa4\xeb\xbe\xec\xb9\xe7\xa4\xac\xc2\xbf\xa4\xa4"
b"\xa4\xce\xa4\xc7\xa4\xb9\xa4\xac\xa1\xa2\x50\x79\x74\x68\x6f\x6e"
b"\x20\xa4\xc7\xa4\xcf\xa4\xbd\xa4\xa6\xa4\xa4\xa4\xc3\xa4\xbf\xbe"
b"\xae\xba\xd9\xb9\xa9\xa4\xac\xc4\xc9\xb2\xc3\xa4\xb5\xa4\xec\xa4"
b"\xeb\xa4\xb3\xa4\xc8\xa4\xcf\xa4\xa2\xa4\xde\xa4\xea\xa4\xa2\xa4"
b"\xea\xa4\xde\xa4\xbb\xa4\xf3\xa1\xa3\x0a\xb8\xc0\xb8\xec\xbc\xab"
b"\xc2\xce\xa4\xce\xb5\xa1\xc7\xbd\xa4\xcf\xba\xc7\xbe\xae\xb8\xc2"
b"\xa4\xcb\xb2\xa1\xa4\xb5\xa4\xa8\xa1\xa2\xc9\xac\xcd\xd7\xa4\xca"
b"\xb5\xa1\xc7\xbd\xa4\xcf\xb3\xc8\xc4\xa5\xa5\xe2\xa5\xb8\xa5\xe5"
b"\xa1\xbc\xa5\xeb\xa4\xc8\xa4\xb7\xa4\xc6\xc4\xc9\xb2\xc3\xa4\xb9"
b"\xa4\xeb\xa1\xa2\xa4\xc8\xa4\xa4\xa4\xa6\xa4\xce\xa4\xac\x20\x50"
b"\x79\x74\x68\x6f\x6e\x20\xa4\xce\xa5\xdd\xa5\xea\xa5\xb7\xa1\xbc"
b"\xa4\xc7\xa4\xb9\xa1\xa3\x0a\x0a",
b"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
b"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
b"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
b"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
b"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
b"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
b"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
b"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
b"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
b"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
b"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
b"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
b"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
b"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
b"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
b"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
b"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
b"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
b"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
b"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
b"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
b"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
b"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
b"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
b"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
b"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
b"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
b"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
b"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
b"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
b"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
b"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
b"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
b"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
b"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
b"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
b"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
b"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
b"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
b"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
b"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
b"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
b"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
b"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
b"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
b"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
b"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
b"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
b"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
b"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
b"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
b"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
b"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
b"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
b"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
b"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
b"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
b"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
b"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
b"\x99\xe3\x80\x82\x0a\x0a"),
'euc_kr': (
b"\xa1\xdd\x20\xc6\xc4\xc0\xcc\xbd\xe3\x28\x50\x79\x74\x68\x6f\x6e"
b"\x29\xc0\xba\x20\xb9\xe8\xbf\xec\xb1\xe2\x20\xbd\xb1\xb0\xed\x2c"
b"\x20\xb0\xad\xb7\xc2\xc7\xd1\x20\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1"
b"\xb9\xd6\x20\xbe\xf0\xbe\xee\xc0\xd4\xb4\xcf\xb4\xd9\x2e\x20\xc6"
b"\xc4\xc0\xcc\xbd\xe3\xc0\xba\x0a\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce"
b"\x20\xb0\xed\xbc\xf6\xc1\xd8\x20\xb5\xa5\xc0\xcc\xc5\xcd\x20\xb1"
b"\xb8\xc1\xb6\xbf\xcd\x20\xb0\xa3\xb4\xdc\xc7\xcf\xc1\xf6\xb8\xb8"
b"\x20\xc8\xbf\xc0\xb2\xc0\xfb\xc0\xce\x20\xb0\xb4\xc3\xbc\xc1\xf6"
b"\xc7\xe2\xc7\xc1\xb7\xce\xb1\xd7\xb7\xa1\xb9\xd6\xc0\xbb\x0a\xc1"
b"\xf6\xbf\xf8\xc7\xd5\xb4\xcf\xb4\xd9\x2e\x20\xc6\xc4\xc0\xcc\xbd"
b"\xe3\xc0\xc7\x20\xbf\xec\xbe\xc6\x28\xe9\xd0\xe4\xba\x29\xc7\xd1"
b"\x20\xb9\xae\xb9\xfd\xb0\xfa\x20\xb5\xbf\xc0\xfb\x20\xc5\xb8\xc0"
b"\xcc\xc7\xce\x2c\x20\xb1\xd7\xb8\xae\xb0\xed\x20\xc0\xce\xc5\xcd"
b"\xc7\xc1\xb8\xae\xc6\xc3\x0a\xc8\xaf\xb0\xe6\xc0\xba\x20\xc6\xc4"
b"\xc0\xcc\xbd\xe3\xc0\xbb\x20\xbd\xba\xc5\xa9\xb8\xb3\xc6\xc3\xb0"
b"\xfa\x20\xbf\xa9\xb7\xaf\x20\xba\xd0\xbe\xdf\xbf\xa1\xbc\xad\xbf"
b"\xcd\x20\xb4\xeb\xba\xce\xba\xd0\xc0\xc7\x20\xc7\xc3\xb7\xa7\xc6"
b"\xfb\xbf\xa1\xbc\xad\xc0\xc7\x20\xba\xfc\xb8\xa5\x0a\xbe\xd6\xc7"
b"\xc3\xb8\xae\xc4\xc9\xc0\xcc\xbc\xc7\x20\xb0\xb3\xb9\xdf\xc0\xbb"
b"\x20\xc7\xd2\x20\xbc\xf6\x20\xc0\xd6\xb4\xc2\x20\xc0\xcc\xbb\xf3"
b"\xc0\xfb\xc0\xce\x20\xbe\xf0\xbe\xee\xb7\xce\x20\xb8\xb8\xb5\xe9"
b"\xbe\xee\xc1\xdd\xb4\xcf\xb4\xd9\x2e\x0a\x0a\xa1\xd9\xc3\xb9\xb0"
b"\xa1\xb3\xa1\x3a\x20\xb3\xaf\xbe\xc6\xb6\xf3\x20\xa4\xd4\xa4\xb6"
b"\xa4\xd0\xa4\xd4\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4\xbe\xb1\x7e\x20"
b"\xa4\xd4\xa4\xa4\xa4\xd2\xa4\xb7\xc5\xad\x21\x20\xa4\xd4\xa4\xa8"
b"\xa4\xd1\xa4\xb7\xb1\xdd\xbe\xf8\xc0\xcc\x20\xc0\xfc\xa4\xd4\xa4"
b"\xbe\xa4\xc8\xa4\xb2\xb4\xcf\xb4\xd9\x2e\x20\xa4\xd4\xa4\xb2\xa4"
b"\xce\xa4\xaa\x2e\x20\xb1\xd7\xb7\xb1\xb0\xc5\x20\xa4\xd4\xa4\xb7"
b"\xa4\xd1\xa4\xb4\xb4\xd9\x2e\x0a",
b"\xe2\x97\x8e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\x28\x50\x79"
b"\x74\x68\x6f\x6e\x29\xec\x9d\x80\x20\xeb\xb0\xb0\xec\x9a\xb0\xea"
b"\xb8\xb0\x20\xec\x89\xbd\xea\xb3\xa0\x2c\x20\xea\xb0\x95\xeb\xa0"
b"\xa5\xed\x95\x9c\x20\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8\xeb\x9e"
b"\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96\xb4\xec\x9e\x85\xeb\x8b"
b"\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec"
b"\x9d\x80\x0a\xed\x9a\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20"
b"\xea\xb3\xa0\xec\x88\x98\xec\xa4\x80\x20\xeb\x8d\xb0\xec\x9d\xb4"
b"\xed\x84\xb0\x20\xea\xb5\xac\xec\xa1\xb0\xec\x99\x80\x20\xea\xb0"
b"\x84\xeb\x8b\xa8\xed\x95\x98\xec\xa7\x80\xeb\xa7\x8c\x20\xed\x9a"
b"\xa8\xec\x9c\xa8\xec\xa0\x81\xec\x9d\xb8\x20\xea\xb0\x9d\xec\xb2"
b"\xb4\xec\xa7\x80\xed\x96\xa5\xed\x94\x84\xeb\xa1\x9c\xea\xb7\xb8"
b"\xeb\x9e\x98\xeb\xb0\x8d\xec\x9d\x84\x0a\xec\xa7\x80\xec\x9b\x90"
b"\xed\x95\xa9\xeb\x8b\x88\xeb\x8b\xa4\x2e\x20\xed\x8c\x8c\xec\x9d"
b"\xb4\xec\x8d\xac\xec\x9d\x98\x20\xec\x9a\xb0\xec\x95\x84\x28\xe5"
b"\x84\xaa\xe9\x9b\x85\x29\xed\x95\x9c\x20\xeb\xac\xb8\xeb\xb2\x95"
b"\xea\xb3\xbc\x20\xeb\x8f\x99\xec\xa0\x81\x20\xed\x83\x80\xec\x9d"
b"\xb4\xed\x95\x91\x2c\x20\xea\xb7\xb8\xeb\xa6\xac\xea\xb3\xa0\x20"
b"\xec\x9d\xb8\xed\x84\xb0\xed\x94\x84\xeb\xa6\xac\xed\x8c\x85\x0a"
b"\xed\x99\x98\xea\xb2\xbd\xec\x9d\x80\x20\xed\x8c\x8c\xec\x9d\xb4"
b"\xec\x8d\xac\xec\x9d\x84\x20\xec\x8a\xa4\xed\x81\xac\xeb\xa6\xbd"
b"\xed\x8c\x85\xea\xb3\xbc\x20\xec\x97\xac\xeb\x9f\xac\x20\xeb\xb6"
b"\x84\xec\x95\xbc\xec\x97\x90\xec\x84\x9c\xec\x99\x80\x20\xeb\x8c"
b"\x80\xeb\xb6\x80\xeb\xb6\x84\xec\x9d\x98\x20\xed\x94\x8c\xeb\x9e"
b"\xab\xed\x8f\xbc\xec\x97\x90\xec\x84\x9c\xec\x9d\x98\x20\xeb\xb9"
b"\xa0\xeb\xa5\xb8\x0a\xec\x95\xa0\xed\x94\x8c\xeb\xa6\xac\xec\xbc"
b"\x80\xec\x9d\xb4\xec\x85\x98\x20\xea\xb0\x9c\xeb\xb0\x9c\xec\x9d"
b"\x84\x20\xed\x95\xa0\x20\xec\x88\x98\x20\xec\x9e\x88\xeb\x8a\x94"
b"\x20\xec\x9d\xb4\xec\x83\x81\xec\xa0\x81\xec\x9d\xb8\x20\xec\x96"
b"\xb8\xec\x96\xb4\xeb\xa1\x9c\x20\xeb\xa7\x8c\xeb\x93\xa4\xec\x96"
b"\xb4\xec\xa4\x8d\xeb\x8b\x88\xeb\x8b\xa4\x2e\x0a\x0a\xe2\x98\x86"
b"\xec\xb2\xab\xea\xb0\x80\xeb\x81\x9d\x3a\x20\xeb\x82\xa0\xec\x95"
b"\x84\xeb\x9d\xbc\x20\xec\x93\x94\xec\x93\x94\xec\x93\xa9\x7e\x20"
b"\xeb\x8b\x81\xed\x81\xbc\x21\x20\xeb\x9c\xbd\xea\xb8\x88\xec\x97"
b"\x86\xec\x9d\xb4\x20\xec\xa0\x84\xed\x99\xa5\xeb\x8b\x88\xeb\x8b"
b"\xa4\x2e\x20\xeb\xb7\x81\x2e\x20\xea\xb7\xb8\xeb\x9f\xb0\xea\xb1"
b"\xb0\x20\xec\x9d\x8e\xeb\x8b\xa4\x2e\x0a"),
'gb18030': (
b"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
b"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
b"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
b"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
b"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
b"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
b"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
b"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
b"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
b"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
b"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
b"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
b"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
b"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
b"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
b"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
b"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
b"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
b"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
b"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
b"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e"
b"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20"
b"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3"
b"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd"
b"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc"
b"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba"
b"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3"
b"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9"
b"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb"
b"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c"
b"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b"
b"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74"
b"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72"
b"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20"
b"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3"
b"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72"
b"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c"
b"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82"
b"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69"
b"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e"
b"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2"
b"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4"
b"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20"
b"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c"
b"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7"
b"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a"
b"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7"
b"\x3a\x0a\x83\x35\xc7\x31\x83\x33\x9a\x33\x83\x32\xb1\x31\x83\x33"
b"\x95\x31\x20\x82\x37\xd1\x36\x83\x30\x8c\x34\x83\x36\x84\x33\x20"
b"\x82\x38\x89\x35\x82\x38\xfb\x36\x83\x33\x95\x35\x20\x83\x33\xd5"
b"\x31\x82\x39\x81\x35\x20\x83\x30\xfd\x39\x83\x33\x86\x30\x20\x83"
b"\x34\xdc\x33\x83\x35\xf6\x37\x83\x35\x97\x35\x20\x83\x35\xf9\x35"
b"\x83\x30\x91\x39\x82\x38\x83\x39\x82\x39\xfc\x33\x83\x30\xf0\x34"
b"\x20\x83\x32\xeb\x39\x83\x32\xeb\x35\x82\x39\x83\x39\x2e\x0a\x0a",
b"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
b"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
b"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
b"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
b"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
b"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
b"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
b"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
b"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
b"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
b"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
b"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
b"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
b"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
b"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
b"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
b"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
b"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
b"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
b"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
b"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
b"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
b"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
b"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
b"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
b"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
b"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
b"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
b"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
b"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5"
b"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20"
b"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7"
b"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80"
b"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80"
b"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4"
b"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a"
b"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9"
b"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf"
b"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20"
b"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5"
b"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba"
b"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5"
b"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8"
b"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd"
b"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6"
b"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70"
b"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
b"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
b"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94"
b"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8"
b"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72"
b"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab"
b"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20"
b"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70"
b"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
b"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
b"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8"
b"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89"
b"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b"
b"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7"
b"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f"
b"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6"
b"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88"
b"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab"
b"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf"
b"\x3a\x0a\xed\x8c\x8c\xec\x9d\xb4\xec\x8d\xac\xec\x9d\x80\x20\xea"
b"\xb0\x95\xeb\xa0\xa5\xed\x95\x9c\x20\xea\xb8\xb0\xeb\x8a\xa5\xec"
b"\x9d\x84\x20\xec\xa7\x80\xeb\x8b\x8c\x20\xeb\xb2\x94\xec\x9a\xa9"
b"\x20\xec\xbb\xb4\xed\x93\xa8\xed\x84\xb0\x20\xed\x94\x84\xeb\xa1"
b"\x9c\xea\xb7\xb8\xeb\x9e\x98\xeb\xb0\x8d\x20\xec\x96\xb8\xec\x96"
b"\xb4\xeb\x8b\xa4\x2e\x0a\x0a"),
'gb2312': (
b"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
b"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
b"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
b"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
b"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
b"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
b"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
b"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
b"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
b"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
b"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
b"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
b"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
b"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
b"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
b"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
b"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
b"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
b"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
b"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
b"\xa1\xa3\x0a\x0a",
b"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
b"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
b"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
b"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
b"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
b"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
b"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
b"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
b"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
b"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
b"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
b"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
b"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
b"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
b"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
b"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
b"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
b"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
b"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
b"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
b"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
b"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
b"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
b"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
b"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
b"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
b"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
b"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
b"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
b"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\x0a"),
'gbk': (
b"\x50\x79\x74\x68\x6f\x6e\xa3\xa8\xc5\xc9\xc9\xad\xa3\xa9\xd3\xef"
b"\xd1\xd4\xca\xc7\xd2\xbb\xd6\xd6\xb9\xa6\xc4\xdc\xc7\xbf\xb4\xf3"
b"\xb6\xf8\xcd\xea\xc9\xc6\xb5\xc4\xcd\xa8\xd3\xc3\xd0\xcd\xbc\xc6"
b"\xcb\xe3\xbb\xfa\xb3\xcc\xd0\xf2\xc9\xe8\xbc\xc6\xd3\xef\xd1\xd4"
b"\xa3\xac\x0a\xd2\xd1\xbe\xad\xbe\xdf\xd3\xd0\xca\xae\xb6\xe0\xc4"
b"\xea\xb5\xc4\xb7\xa2\xd5\xb9\xc0\xfa\xca\xb7\xa3\xac\xb3\xc9\xca"
b"\xec\xc7\xd2\xce\xc8\xb6\xa8\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1"
b"\xd4\xbe\xdf\xd3\xd0\xb7\xc7\xb3\xa3\xbc\xf2\xbd\xdd\xb6\xf8\xc7"
b"\xe5\xce\xfa\x0a\xb5\xc4\xd3\xef\xb7\xa8\xcc\xd8\xb5\xe3\xa3\xac"
b"\xca\xca\xba\xcf\xcd\xea\xb3\xc9\xb8\xf7\xd6\xd6\xb8\xdf\xb2\xe3"
b"\xc8\xce\xce\xf1\xa3\xac\xbc\xb8\xba\xf5\xbf\xc9\xd2\xd4\xd4\xda"
b"\xcb\xf9\xd3\xd0\xb5\xc4\xb2\xd9\xd7\xf7\xcf\xb5\xcd\xb3\xd6\xd0"
b"\x0a\xd4\xcb\xd0\xd0\xa1\xa3\xd5\xe2\xd6\xd6\xd3\xef\xd1\xd4\xbc"
b"\xf2\xb5\xa5\xb6\xf8\xc7\xbf\xb4\xf3\xa3\xac\xca\xca\xba\xcf\xb8"
b"\xf7\xd6\xd6\xc8\xcb\xca\xbf\xd1\xa7\xcf\xb0\xca\xb9\xd3\xc3\xa1"
b"\xa3\xc4\xbf\xc7\xb0\xa3\xac\xbb\xf9\xd3\xda\xd5\xe2\x0a\xd6\xd6"
b"\xd3\xef\xd1\xd4\xb5\xc4\xcf\xe0\xb9\xd8\xbc\xbc\xca\xf5\xd5\xfd"
b"\xd4\xda\xb7\xc9\xcb\xd9\xb5\xc4\xb7\xa2\xd5\xb9\xa3\xac\xd3\xc3"
b"\xbb\xa7\xca\xfd\xc1\xbf\xbc\xb1\xbe\xe7\xc0\xa9\xb4\xf3\xa3\xac"
b"\xcf\xe0\xb9\xd8\xb5\xc4\xd7\xca\xd4\xb4\xb7\xc7\xb3\xa3\xb6\xe0"
b"\xa1\xa3\x0a\xc8\xe7\xba\xce\xd4\xda\x20\x50\x79\x74\x68\x6f\x6e"
b"\x20\xd6\xd0\xca\xb9\xd3\xc3\xbc\xc8\xd3\xd0\xb5\xc4\x20\x43\x20"
b"\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xa1\xa1\xd4\xda\xd9\x59\xd3"
b"\x8d\xbf\xc6\xbc\xbc\xbf\xec\xcb\xd9\xb0\x6c\xd5\xb9\xb5\xc4\xbd"
b"\xf1\xcc\xec\x2c\x20\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xdc"
b"\x9b\xf3\x77\xb5\xc4\xcb\xd9\xb6\xc8\xca\xc7\xb2\xbb\xc8\xdd\xba"
b"\xf6\xd2\x95\xb5\xc4\x0a\xd5\x6e\xee\x7d\x2e\x20\x9e\xe9\xbc\xd3"
b"\xbf\xec\xe9\x5f\xb0\x6c\xbc\xb0\x9c\x79\xd4\x87\xb5\xc4\xcb\xd9"
b"\xb6\xc8\x2c\x20\xce\xd2\x82\x83\xb1\xe3\xb3\xa3\xcf\xa3\xcd\xfb"
b"\xc4\xdc\xc0\xfb\xd3\xc3\xd2\xbb\xd0\xa9\xd2\xd1\xe9\x5f\xb0\x6c"
b"\xba\xc3\xb5\xc4\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\x81\x4b"
b"\xd3\xd0\xd2\xbb\x82\x80\x20\x66\x61\x73\x74\x20\x70\x72\x6f\x74"
b"\x6f\x74\x79\x70\x69\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72"
b"\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x20"
b"\xbf\xc9\x0a\xb9\xa9\xca\xb9\xd3\xc3\x2e\x20\xc4\xbf\xc7\xb0\xd3"
b"\xd0\xd4\x53\xd4\x53\xb6\xe0\xb6\xe0\xb5\xc4\x20\x6c\x69\x62\x72"
b"\x61\x72\x79\x20\xca\xc7\xd2\xd4\x20\x43\x20\x8c\x91\xb3\xc9\x2c"
b"\x20\xb6\xf8\x20\x50\x79\x74\x68\x6f\x6e\x20\xca\xc7\xd2\xbb\x82"
b"\x80\x0a\x66\x61\x73\x74\x20\x70\x72\x6f\x74\x6f\x74\x79\x70\x69"
b"\x6e\x67\x20\xb5\xc4\x20\x70\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e"
b"\x67\x20\x6c\x61\x6e\x67\x75\x61\x67\x65\x2e\x20\xb9\xca\xce\xd2"
b"\x82\x83\xcf\xa3\xcd\xfb\xc4\xdc\x8c\xa2\xbc\xc8\xd3\xd0\xb5\xc4"
b"\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xc4\xc3\xb5\xbd\x20"
b"\x50\x79\x74\x68\x6f\x6e\x20\xb5\xc4\xad\x68\xbe\xb3\xd6\xd0\x9c"
b"\x79\xd4\x87\xbc\xb0\xd5\xfb\xba\xcf\x2e\x20\xc6\xe4\xd6\xd0\xd7"
b"\xee\xd6\xf7\xd2\xaa\xd2\xb2\xca\xc7\xce\xd2\x82\x83\xcb\xf9\x0a"
b"\xd2\xaa\xd3\x91\xd5\x93\xb5\xc4\x86\x96\xee\x7d\xbe\xcd\xca\xc7"
b"\x3a\x0a\x0a",
b"\x50\x79\x74\x68\x6f\x6e\xef\xbc\x88\xe6\xb4\xbe\xe6\xa3\xae\xef"
b"\xbc\x89\xe8\xaf\xad\xe8\xa8\x80\xe6\x98\xaf\xe4\xb8\x80\xe7\xa7"
b"\x8d\xe5\x8a\x9f\xe8\x83\xbd\xe5\xbc\xba\xe5\xa4\xa7\xe8\x80\x8c"
b"\xe5\xae\x8c\xe5\x96\x84\xe7\x9a\x84\xe9\x80\x9a\xe7\x94\xa8\xe5"
b"\x9e\x8b\xe8\xae\xa1\xe7\xae\x97\xe6\x9c\xba\xe7\xa8\x8b\xe5\xba"
b"\x8f\xe8\xae\xbe\xe8\xae\xa1\xe8\xaf\xad\xe8\xa8\x80\xef\xbc\x8c"
b"\x0a\xe5\xb7\xb2\xe7\xbb\x8f\xe5\x85\xb7\xe6\x9c\x89\xe5\x8d\x81"
b"\xe5\xa4\x9a\xe5\xb9\xb4\xe7\x9a\x84\xe5\x8f\x91\xe5\xb1\x95\xe5"
b"\x8e\x86\xe5\x8f\xb2\xef\xbc\x8c\xe6\x88\x90\xe7\x86\x9f\xe4\xb8"
b"\x94\xe7\xa8\xb3\xe5\xae\x9a\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d"
b"\xe8\xaf\xad\xe8\xa8\x80\xe5\x85\xb7\xe6\x9c\x89\xe9\x9d\x9e\xe5"
b"\xb8\xb8\xe7\xae\x80\xe6\x8d\xb7\xe8\x80\x8c\xe6\xb8\x85\xe6\x99"
b"\xb0\x0a\xe7\x9a\x84\xe8\xaf\xad\xe6\xb3\x95\xe7\x89\xb9\xe7\x82"
b"\xb9\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\xae\x8c\xe6\x88\x90"
b"\xe5\x90\x84\xe7\xa7\x8d\xe9\xab\x98\xe5\xb1\x82\xe4\xbb\xbb\xe5"
b"\x8a\xa1\xef\xbc\x8c\xe5\x87\xa0\xe4\xb9\x8e\xe5\x8f\xaf\xe4\xbb"
b"\xa5\xe5\x9c\xa8\xe6\x89\x80\xe6\x9c\x89\xe7\x9a\x84\xe6\x93\x8d"
b"\xe4\xbd\x9c\xe7\xb3\xbb\xe7\xbb\x9f\xe4\xb8\xad\x0a\xe8\xbf\x90"
b"\xe8\xa1\x8c\xe3\x80\x82\xe8\xbf\x99\xe7\xa7\x8d\xe8\xaf\xad\xe8"
b"\xa8\x80\xe7\xae\x80\xe5\x8d\x95\xe8\x80\x8c\xe5\xbc\xba\xe5\xa4"
b"\xa7\xef\xbc\x8c\xe9\x80\x82\xe5\x90\x88\xe5\x90\x84\xe7\xa7\x8d"
b"\xe4\xba\xba\xe5\xa3\xab\xe5\xad\xa6\xe4\xb9\xa0\xe4\xbd\xbf\xe7"
b"\x94\xa8\xe3\x80\x82\xe7\x9b\xae\xe5\x89\x8d\xef\xbc\x8c\xe5\x9f"
b"\xba\xe4\xba\x8e\xe8\xbf\x99\x0a\xe7\xa7\x8d\xe8\xaf\xad\xe8\xa8"
b"\x80\xe7\x9a\x84\xe7\x9b\xb8\xe5\x85\xb3\xe6\x8a\x80\xe6\x9c\xaf"
b"\xe6\xad\xa3\xe5\x9c\xa8\xe9\xa3\x9e\xe9\x80\x9f\xe7\x9a\x84\xe5"
b"\x8f\x91\xe5\xb1\x95\xef\xbc\x8c\xe7\x94\xa8\xe6\x88\xb7\xe6\x95"
b"\xb0\xe9\x87\x8f\xe6\x80\xa5\xe5\x89\xa7\xe6\x89\xa9\xe5\xa4\xa7"
b"\xef\xbc\x8c\xe7\x9b\xb8\xe5\x85\xb3\xe7\x9a\x84\xe8\xb5\x84\xe6"
b"\xba\x90\xe9\x9d\x9e\xe5\xb8\xb8\xe5\xa4\x9a\xe3\x80\x82\x0a\xe5"
b"\xa6\x82\xe4\xbd\x95\xe5\x9c\xa8\x20\x50\x79\x74\x68\x6f\x6e\x20"
b"\xe4\xb8\xad\xe4\xbd\xbf\xe7\x94\xa8\xe6\x97\xa2\xe6\x9c\x89\xe7"
b"\x9a\x84\x20\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x3f\x0a\xe3\x80"
b"\x80\xe5\x9c\xa8\xe8\xb3\x87\xe8\xa8\x8a\xe7\xa7\x91\xe6\x8a\x80"
b"\xe5\xbf\xab\xe9\x80\x9f\xe7\x99\xbc\xe5\xb1\x95\xe7\x9a\x84\xe4"
b"\xbb\x8a\xe5\xa4\xa9\x2c\x20\xe9\x96\x8b\xe7\x99\xbc\xe5\x8f\x8a"
b"\xe6\xb8\xac\xe8\xa9\xa6\xe8\xbb\x9f\xe9\xab\x94\xe7\x9a\x84\xe9"
b"\x80\x9f\xe5\xba\xa6\xe6\x98\xaf\xe4\xb8\x8d\xe5\xae\xb9\xe5\xbf"
b"\xbd\xe8\xa6\x96\xe7\x9a\x84\x0a\xe8\xaa\xb2\xe9\xa1\x8c\x2e\x20"
b"\xe7\x82\xba\xe5\x8a\xa0\xe5\xbf\xab\xe9\x96\x8b\xe7\x99\xbc\xe5"
b"\x8f\x8a\xe6\xb8\xac\xe8\xa9\xa6\xe7\x9a\x84\xe9\x80\x9f\xe5\xba"
b"\xa6\x2c\x20\xe6\x88\x91\xe5\x80\x91\xe4\xbe\xbf\xe5\xb8\xb8\xe5"
b"\xb8\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\x88\xa9\xe7\x94\xa8\xe4\xb8"
b"\x80\xe4\xba\x9b\xe5\xb7\xb2\xe9\x96\x8b\xe7\x99\xbc\xe5\xa5\xbd"
b"\xe7\x9a\x84\x0a\x6c\x69\x62\x72\x61\x72\x79\x2c\x20\xe4\xb8\xa6"
b"\xe6\x9c\x89\xe4\xb8\x80\xe5\x80\x8b\x20\x66\x61\x73\x74\x20\x70"
b"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
b"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
b"\x61\x67\x65\x20\xe5\x8f\xaf\x0a\xe4\xbe\x9b\xe4\xbd\xbf\xe7\x94"
b"\xa8\x2e\x20\xe7\x9b\xae\xe5\x89\x8d\xe6\x9c\x89\xe8\xa8\xb1\xe8"
b"\xa8\xb1\xe5\xa4\x9a\xe5\xa4\x9a\xe7\x9a\x84\x20\x6c\x69\x62\x72"
b"\x61\x72\x79\x20\xe6\x98\xaf\xe4\xbb\xa5\x20\x43\x20\xe5\xaf\xab"
b"\xe6\x88\x90\x2c\x20\xe8\x80\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20"
b"\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\x0a\x66\x61\x73\x74\x20\x70"
b"\x72\x6f\x74\x6f\x74\x79\x70\x69\x6e\x67\x20\xe7\x9a\x84\x20\x70"
b"\x72\x6f\x67\x72\x61\x6d\x6d\x69\x6e\x67\x20\x6c\x61\x6e\x67\x75"
b"\x61\x67\x65\x2e\x20\xe6\x95\x85\xe6\x88\x91\xe5\x80\x91\xe5\xb8"
b"\x8c\xe6\x9c\x9b\xe8\x83\xbd\xe5\xb0\x87\xe6\x97\xa2\xe6\x9c\x89"
b"\xe7\x9a\x84\x0a\x43\x20\x6c\x69\x62\x72\x61\x72\x79\x20\xe6\x8b"
b"\xbf\xe5\x88\xb0\x20\x50\x79\x74\x68\x6f\x6e\x20\xe7\x9a\x84\xe7"
b"\x92\xb0\xe5\xa2\x83\xe4\xb8\xad\xe6\xb8\xac\xe8\xa9\xa6\xe5\x8f"
b"\x8a\xe6\x95\xb4\xe5\x90\x88\x2e\x20\xe5\x85\xb6\xe4\xb8\xad\xe6"
b"\x9c\x80\xe4\xb8\xbb\xe8\xa6\x81\xe4\xb9\x9f\xe6\x98\xaf\xe6\x88"
b"\x91\xe5\x80\x91\xe6\x89\x80\x0a\xe8\xa6\x81\xe8\xa8\x8e\xe8\xab"
b"\x96\xe7\x9a\x84\xe5\x95\x8f\xe9\xa1\x8c\xe5\xb0\xb1\xe6\x98\xaf"
b"\x3a\x0a\x0a"),
'johab': (
b"\x99\xb1\xa4\x77\x88\x62\xd0\x61\x20\xcd\x5c\xaf\xa1\xc5\xa9\x9c"
b"\x61\x0a\x0a\xdc\xc0\xdc\xc0\x90\x73\x21\x21\x20\xf1\x67\xe2\x9c"
b"\xf0\x55\xcc\x81\xa3\x89\x9f\x85\x8a\xa1\x20\xdc\xde\xdc\xd3\xd2"
b"\x7a\xd9\xaf\xd9\xaf\xd9\xaf\x20\x8b\x77\x96\xd3\x20\xdc\xd1\x95"
b"\x81\x20\xdc\xc0\x2e\x20\x2e\x0a\xed\x3c\xb5\x77\xdc\xd1\x93\x77"
b"\xd2\x73\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xac\xe1\xb6\x89\x9e"
b"\xa1\x20\x95\x65\xd0\x62\xf0\xe0\x20\xe0\x3b\xd2\x7a\x20\x21\x20"
b"\x21\x20\x21\x87\x41\x2e\x87\x41\x0a\xd3\x61\xd3\x61\xd3\x61\x20"
b"\x88\x41\x88\x41\x88\x41\xd9\x69\x87\x41\x5f\x87\x41\x20\xb4\xe1"
b"\x9f\x9a\x20\xc8\xa1\xc5\xc1\x8b\x7a\x20\x95\x61\xb7\x77\x20\xc3"
b"\x97\xe2\x9c\x97\x69\xf0\xe0\x20\xdc\xc0\x97\x61\x8b\x7a\x0a\xac"
b"\xe9\x9f\x7a\x20\xe0\x3b\xd2\x7a\x20\x2e\x20\x2e\x20\x2e\x20\x2e"
b"\x20\x8a\x89\xb4\x81\xae\xba\x20\xdc\xd1\x8a\xa1\x20\xdc\xde\x9f"
b"\x89\xdc\xc2\x8b\x7a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9"
b"\x8c\x61\xbb\x9a\x0a\xb5\xc1\xb2\xa1\xd2\x7a\x20\x21\x20\x21\x20"
b"\xed\x3c\xb5\x77\xdc\xd1\x20\xe0\x3b\x93\x77\x8a\xa1\x20\xd9\x69"
b"\xea\xbe\x89\xc5\x20\xb4\xf4\x93\x77\x8a\xa1\x93\x77\x20\xed\x3c"
b"\x93\x77\x96\xc1\xd2\x7a\x20\x8b\x69\xb4\x81\x97\x7a\x0a\xdc\xde"
b"\x9d\x61\x97\x41\xe2\x9c\x20\xaf\x81\xce\xa1\xae\xa1\xd2\x7a\x20"
b"\xb4\xe1\x9f\x9a\x20\xf1\x67\xf1\x62\xf5\x49\xed\xfc\xf3\xe9\xaf"
b"\x82\xdc\xef\x97\x69\xb4\x7a\x21\x21\x20\xdc\xc0\xdc\xc0\x90\x73"
b"\xd9\xbd\x20\xd9\x62\xd9\x62\x2a\x0a\x0a",
b"\xeb\x98\xa0\xeb\xb0\xa9\xea\xb0\x81\xed\x95\x98\x20\xed\x8e\xb2"
b"\xec\x8b\x9c\xec\xbd\x9c\xeb\x9d\xbc\x0a\x0a\xe3\x89\xaf\xe3\x89"
b"\xaf\xeb\x82\xa9\x21\x21\x20\xe5\x9b\xa0\xe4\xb9\x9d\xe6\x9c\x88"
b"\xed\x8c\xa8\xeb\xaf\xa4\xeb\xa6\x94\xea\xb6\x88\x20\xe2\x93\xa1"
b"\xe2\x93\x96\xed\x9b\x80\xc2\xbf\xc2\xbf\xc2\xbf\x20\xea\xb8\x8d"
b"\xeb\x92\x99\x20\xe2\x93\x94\xeb\x8e\xa8\x20\xe3\x89\xaf\x2e\x20"
b"\x2e\x0a\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94\xeb\x8a\xa5\xed\x9a"
b"\xb9\x20\x2e\x20\x2e\x20\x2e\x20\x2e\x20\xec\x84\x9c\xec\x9a\xb8"
b"\xeb\xa4\x84\x20\xeb\x8e\x90\xed\x95\x99\xe4\xb9\x99\x20\xe5\xae"
b"\xb6\xed\x9b\x80\x20\x21\x20\x21\x20\x21\xe3\x85\xa0\x2e\xe3\x85"
b"\xa0\x0a\xed\x9d\x90\xed\x9d\x90\xed\x9d\x90\x20\xe3\x84\xb1\xe3"
b"\x84\xb1\xe3\x84\xb1\xe2\x98\x86\xe3\x85\xa0\x5f\xe3\x85\xa0\x20"
b"\xec\x96\xb4\xeb\xa6\xa8\x20\xed\x83\xb8\xec\xbd\xb0\xea\xb8\x90"
b"\x20\xeb\x8e\x8c\xec\x9d\x91\x20\xec\xb9\x91\xe4\xb9\x9d\xeb\x93"
b"\xa4\xe4\xb9\x99\x20\xe3\x89\xaf\xeb\x93\x9c\xea\xb8\x90\x0a\xec"
b"\x84\xa4\xeb\xa6\x8c\x20\xe5\xae\xb6\xed\x9b\x80\x20\x2e\x20\x2e"
b"\x20\x2e\x20\x2e\x20\xea\xb5\xb4\xec\x95\xa0\xec\x89\x8c\x20\xe2"
b"\x93\x94\xea\xb6\x88\x20\xe2\x93\xa1\xeb\xa6\x98\xe3\x89\xb1\xea"
b"\xb8\x90\x20\xe5\x9b\xa0\xe4\xbb\x81\xe5\xb7\x9d\xef\xa6\x81\xe4"
b"\xb8\xad\xea\xb9\x8c\xec\xa6\xbc\x0a\xec\x99\x80\xec\x92\x80\xed"
b"\x9b\x80\x20\x21\x20\x21\x20\xe4\xba\x9e\xec\x98\x81\xe2\x93\x94"
b"\x20\xe5\xae\xb6\xeb\x8a\xa5\xea\xb6\x88\x20\xe2\x98\x86\xe4\xb8"
b"\x8a\xea\xb4\x80\x20\xec\x97\x86\xeb\x8a\xa5\xea\xb6\x88\xeb\x8a"
b"\xa5\x20\xe4\xba\x9e\xeb\x8a\xa5\xeb\x92\x88\xed\x9b\x80\x20\xea"
b"\xb8\x80\xec\x95\xa0\xeb\x93\xb4\x0a\xe2\x93\xa1\xeb\xa0\xa4\xeb"
b"\x93\x80\xe4\xb9\x9d\x20\xec\x8b\x80\xed\x92\x94\xec\x88\xb4\xed"
b"\x9b\x80\x20\xec\x96\xb4\xeb\xa6\xa8\x20\xe5\x9b\xa0\xe4\xbb\x81"
b"\xe5\xb7\x9d\xef\xa6\x81\xe4\xb8\xad\xec\x8b\x81\xe2\x91\xa8\xeb"
b"\x93\xa4\xec\x95\x9c\x21\x21\x20\xe3\x89\xaf\xe3\x89\xaf\xeb\x82"
b"\xa9\xe2\x99\xa1\x20\xe2\x8c\x92\xe2\x8c\x92\x2a\x0a\x0a"),
'shift_jis': (
b"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81"
b"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7"
b"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7"
b"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64"
b"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b"
b"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83"
b"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76"
b"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4"
b"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20"
b"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9"
b"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2"
b"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82"
b"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82"
b"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83"
b"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82"
b"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89"
b"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52"
b"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93"
b"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82"
b"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75"
b"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0"
b"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3"
b"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82"
b"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82"
b"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c"
b"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83"
b"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82"
b"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95"
b"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82"
b"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc"
b"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea"
b"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6"
b"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5"
b"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe"
b"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8"
b"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2"
b"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e"
b"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f"
b"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82"
b"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82"
b"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9"
b"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0"
b"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8"
b"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85"
b"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7"
b"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50"
b"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b"
b"\x82\xc5\x82\xb7\x81\x42\x0a\x0a",
b"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
b"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
b"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
b"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
b"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
b"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
b"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
b"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
b"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
b"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
b"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
b"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
b"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
b"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
b"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
b"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
b"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
b"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
b"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
b"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
b"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
b"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
b"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
b"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
b"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
b"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
b"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
b"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
b"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
b"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
b"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
b"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
b"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
b"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
b"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
b"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
b"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
b"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
b"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
b"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
b"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
b"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
b"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
b"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
b"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
b"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
b"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
b"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
b"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
b"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
b"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
b"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
b"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
b"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
b"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
b"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
b"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
b"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
b"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
b"\x99\xe3\x80\x82\x0a\x0a"),
'shift_jisx0213': (
b"\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8a\x4a\x94\xad\x82\xcd\x81"
b"\x41\x31\x39\x39\x30\x20\x94\x4e\x82\xb2\x82\xeb\x82\xa9\x82\xe7"
b"\x8a\x4a\x8e\x6e\x82\xb3\x82\xea\x82\xc4\x82\xa2\x82\xdc\x82\xb7"
b"\x81\x42\x0a\x8a\x4a\x94\xad\x8e\xd2\x82\xcc\x20\x47\x75\x69\x64"
b"\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73\x73\x75\x6d\x20\x82\xcd\x8b"
b"\xb3\x88\xe7\x97\x70\x82\xcc\x83\x76\x83\x8d\x83\x4f\x83\x89\x83"
b"\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x81\x75\x41\x42\x43\x81\x76"
b"\x82\xcc\x8a\x4a\x94\xad\x82\xc9\x8e\x51\x89\xc1\x82\xb5\x82\xc4"
b"\x82\xa2\x82\xdc\x82\xb5\x82\xbd\x82\xaa\x81\x41\x41\x42\x43\x20"
b"\x82\xcd\x8e\xc0\x97\x70\x8f\xe3\x82\xcc\x96\xda\x93\x49\x82\xc9"
b"\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x93\x4b\x82\xb5\x82\xc4\x82\xa2"
b"\x82\xdc\x82\xb9\x82\xf1\x82\xc5\x82\xb5\x82\xbd\x81\x42\x0a\x82"
b"\xb1\x82\xcc\x82\xbd\x82\xdf\x81\x41\x47\x75\x69\x64\x6f\x20\x82"
b"\xcd\x82\xe6\x82\xe8\x8e\xc0\x97\x70\x93\x49\x82\xc8\x83\x76\x83"
b"\x8d\x83\x4f\x83\x89\x83\x7e\x83\x93\x83\x4f\x8c\xbe\x8c\xea\x82"
b"\xcc\x8a\x4a\x94\xad\x82\xf0\x8a\x4a\x8e\x6e\x82\xb5\x81\x41\x89"
b"\x70\x8d\x91\x20\x42\x42\x53\x20\x95\xfa\x91\x97\x82\xcc\x83\x52"
b"\x83\x81\x83\x66\x83\x42\x94\xd4\x91\x67\x81\x75\x83\x82\x83\x93"
b"\x83\x65\x83\x42\x20\x83\x70\x83\x43\x83\x5c\x83\x93\x81\x76\x82"
b"\xcc\x83\x74\x83\x40\x83\x93\x82\xc5\x82\xa0\x82\xe9\x20\x47\x75"
b"\x69\x64\x6f\x20\x82\xcd\x82\xb1\x82\xcc\x8c\xbe\x8c\xea\x82\xf0"
b"\x81\x75\x50\x79\x74\x68\x6f\x6e\x81\x76\x82\xc6\x96\xbc\x82\xc3"
b"\x82\xaf\x82\xdc\x82\xb5\x82\xbd\x81\x42\x0a\x82\xb1\x82\xcc\x82"
b"\xe6\x82\xa4\x82\xc8\x94\x77\x8c\x69\x82\xa9\x82\xe7\x90\xb6\x82"
b"\xdc\x82\xea\x82\xbd\x20\x50\x79\x74\x68\x6f\x6e\x20\x82\xcc\x8c"
b"\xbe\x8c\xea\x90\xdd\x8c\x76\x82\xcd\x81\x41\x81\x75\x83\x56\x83"
b"\x93\x83\x76\x83\x8b\x81\x76\x82\xc5\x81\x75\x8f\x4b\x93\xbe\x82"
b"\xaa\x97\x65\x88\xd5\x81\x76\x82\xc6\x82\xa2\x82\xa4\x96\xda\x95"
b"\x57\x82\xc9\x8f\x64\x93\x5f\x82\xaa\x92\x75\x82\xa9\x82\xea\x82"
b"\xc4\x82\xa2\x82\xdc\x82\xb7\x81\x42\x0a\x91\xbd\x82\xad\x82\xcc"
b"\x83\x58\x83\x4e\x83\x8a\x83\x76\x83\x67\x8c\x6e\x8c\xbe\x8c\xea"
b"\x82\xc5\x82\xcd\x83\x86\x81\x5b\x83\x55\x82\xcc\x96\xda\x90\xe6"
b"\x82\xcc\x97\x98\x95\xd6\x90\xab\x82\xf0\x97\x44\x90\xe6\x82\xb5"
b"\x82\xc4\x90\x46\x81\x58\x82\xc8\x8b\x40\x94\x5c\x82\xf0\x8c\xbe"
b"\x8c\xea\x97\x76\x91\x66\x82\xc6\x82\xb5\x82\xc4\x8e\xe6\x82\xe8"
b"\x93\xfc\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xaa\x91\xbd\x82\xa2"
b"\x82\xcc\x82\xc5\x82\xb7\x82\xaa\x81\x41\x50\x79\x74\x68\x6f\x6e"
b"\x20\x82\xc5\x82\xcd\x82\xbb\x82\xa4\x82\xa2\x82\xc1\x82\xbd\x8f"
b"\xac\x8d\xd7\x8d\x48\x82\xaa\x92\xc7\x89\xc1\x82\xb3\x82\xea\x82"
b"\xe9\x82\xb1\x82\xc6\x82\xcd\x82\xa0\x82\xdc\x82\xe8\x82\xa0\x82"
b"\xe8\x82\xdc\x82\xb9\x82\xf1\x81\x42\x0a\x8c\xbe\x8c\xea\x8e\xa9"
b"\x91\xcc\x82\xcc\x8b\x40\x94\x5c\x82\xcd\x8d\xc5\x8f\xac\x8c\xc0"
b"\x82\xc9\x89\x9f\x82\xb3\x82\xa6\x81\x41\x95\x4b\x97\x76\x82\xc8"
b"\x8b\x40\x94\x5c\x82\xcd\x8a\x67\x92\xa3\x83\x82\x83\x57\x83\x85"
b"\x81\x5b\x83\x8b\x82\xc6\x82\xb5\x82\xc4\x92\xc7\x89\xc1\x82\xb7"
b"\x82\xe9\x81\x41\x82\xc6\x82\xa2\x82\xa4\x82\xcc\x82\xaa\x20\x50"
b"\x79\x74\x68\x6f\x6e\x20\x82\xcc\x83\x7c\x83\x8a\x83\x56\x81\x5b"
b"\x82\xc5\x82\xb7\x81\x42\x0a\x0a\x83\x6d\x82\xf5\x20\x83\x9e\x20"
b"\x83\x67\x83\x4c\x88\x4b\x88\x79\x20\x98\x83\xfc\xd6\x20\xfc\xd2"
b"\xfc\xe6\xfb\xd4\x0a",
b"\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xaf\xe3\x80\x81\x31\x39\x39\x30\x20\xe5\xb9\xb4\xe3\x81"
b"\x94\xe3\x82\x8d\xe3\x81\x8b\xe3\x82\x89\xe9\x96\x8b\xe5\xa7\x8b"
b"\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3"
b"\x81\x99\xe3\x80\x82\x0a\xe9\x96\x8b\xe7\x99\xba\xe8\x80\x85\xe3"
b"\x81\xae\x20\x47\x75\x69\x64\x6f\x20\x76\x61\x6e\x20\x52\x6f\x73"
b"\x73\x75\x6d\x20\xe3\x81\xaf\xe6\x95\x99\xe8\x82\xb2\xe7\x94\xa8"
b"\xe3\x81\xae\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83\xa9\xe3"
b"\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e\xe3\x80"
b"\x8c\x41\x42\x43\xe3\x80\x8d\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba"
b"\xe3\x81\xab\xe5\x8f\x82\xe5\x8a\xa0\xe3\x81\x97\xe3\x81\xa6\xe3"
b"\x81\x84\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x81\x8c\xe3\x80"
b"\x81\x41\x42\x43\x20\xe3\x81\xaf\xe5\xae\x9f\xe7\x94\xa8\xe4\xb8"
b"\x8a\xe3\x81\xae\xe7\x9b\xae\xe7\x9a\x84\xe3\x81\xab\xe3\x81\xaf"
b"\xe3\x81\x82\xe3\x81\xbe\xe3\x82\x8a\xe9\x81\xa9\xe3\x81\x97\xe3"
b"\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x81"
b"\xa7\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a\xe3\x81\x93\xe3\x81"
b"\xae\xe3\x81\x9f\xe3\x82\x81\xe3\x80\x81\x47\x75\x69\x64\x6f\x20"
b"\xe3\x81\xaf\xe3\x82\x88\xe3\x82\x8a\xe5\xae\x9f\xe7\x94\xa8\xe7"
b"\x9a\x84\xe3\x81\xaa\xe3\x83\x97\xe3\x83\xad\xe3\x82\xb0\xe3\x83"
b"\xa9\xe3\x83\x9f\xe3\x83\xb3\xe3\x82\xb0\xe8\xa8\x80\xe8\xaa\x9e"
b"\xe3\x81\xae\xe9\x96\x8b\xe7\x99\xba\xe3\x82\x92\xe9\x96\x8b\xe5"
b"\xa7\x8b\xe3\x81\x97\xe3\x80\x81\xe8\x8b\xb1\xe5\x9b\xbd\x20\x42"
b"\x42\x53\x20\xe6\x94\xbe\xe9\x80\x81\xe3\x81\xae\xe3\x82\xb3\xe3"
b"\x83\xa1\xe3\x83\x87\xe3\x82\xa3\xe7\x95\xaa\xe7\xb5\x84\xe3\x80"
b"\x8c\xe3\x83\xa2\xe3\x83\xb3\xe3\x83\x86\xe3\x82\xa3\x20\xe3\x83"
b"\x91\xe3\x82\xa4\xe3\x82\xbd\xe3\x83\xb3\xe3\x80\x8d\xe3\x81\xae"
b"\xe3\x83\x95\xe3\x82\xa1\xe3\x83\xb3\xe3\x81\xa7\xe3\x81\x82\xe3"
b"\x82\x8b\x20\x47\x75\x69\x64\x6f\x20\xe3\x81\xaf\xe3\x81\x93\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe3\x82\x92\xe3\x80\x8c\x50\x79"
b"\x74\x68\x6f\x6e\xe3\x80\x8d\xe3\x81\xa8\xe5\x90\x8d\xe3\x81\xa5"
b"\xe3\x81\x91\xe3\x81\xbe\xe3\x81\x97\xe3\x81\x9f\xe3\x80\x82\x0a"
b"\xe3\x81\x93\xe3\x81\xae\xe3\x82\x88\xe3\x81\x86\xe3\x81\xaa\xe8"
b"\x83\x8c\xe6\x99\xaf\xe3\x81\x8b\xe3\x82\x89\xe7\x94\x9f\xe3\x81"
b"\xbe\xe3\x82\x8c\xe3\x81\x9f\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3"
b"\x81\xae\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa8\xad\xe8\xa8\x88\xe3\x81"
b"\xaf\xe3\x80\x81\xe3\x80\x8c\xe3\x82\xb7\xe3\x83\xb3\xe3\x83\x97"
b"\xe3\x83\xab\xe3\x80\x8d\xe3\x81\xa7\xe3\x80\x8c\xe7\xbf\x92\xe5"
b"\xbe\x97\xe3\x81\x8c\xe5\xae\xb9\xe6\x98\x93\xe3\x80\x8d\xe3\x81"
b"\xa8\xe3\x81\x84\xe3\x81\x86\xe7\x9b\xae\xe6\xa8\x99\xe3\x81\xab"
b"\xe9\x87\x8d\xe7\x82\xb9\xe3\x81\x8c\xe7\xbd\xae\xe3\x81\x8b\xe3"
b"\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80"
b"\x82\x0a\xe5\xa4\x9a\xe3\x81\x8f\xe3\x81\xae\xe3\x82\xb9\xe3\x82"
b"\xaf\xe3\x83\xaa\xe3\x83\x97\xe3\x83\x88\xe7\xb3\xbb\xe8\xa8\x80"
b"\xe8\xaa\x9e\xe3\x81\xa7\xe3\x81\xaf\xe3\x83\xa6\xe3\x83\xbc\xe3"
b"\x82\xb6\xe3\x81\xae\xe7\x9b\xae\xe5\x85\x88\xe3\x81\xae\xe5\x88"
b"\xa9\xe4\xbe\xbf\xe6\x80\xa7\xe3\x82\x92\xe5\x84\xaa\xe5\x85\x88"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\x89\xb2\xe3\x80\x85\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x82\x92\xe8\xa8\x80\xe8\xaa\x9e\xe8\xa6"
b"\x81\xe7\xb4\xa0\xe3\x81\xa8\xe3\x81\x97\xe3\x81\xa6\xe5\x8f\x96"
b"\xe3\x82\x8a\xe5\x85\xa5\xe3\x82\x8c\xe3\x82\x8b\xe5\xa0\xb4\xe5"
b"\x90\x88\xe3\x81\x8c\xe5\xa4\x9a\xe3\x81\x84\xe3\x81\xae\xe3\x81"
b"\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\x50\x79\x74\x68\x6f\x6e"
b"\x20\xe3\x81\xa7\xe3\x81\xaf\xe3\x81\x9d\xe3\x81\x86\xe3\x81\x84"
b"\xe3\x81\xa3\xe3\x81\x9f\xe5\xb0\x8f\xe7\xb4\xb0\xe5\xb7\xa5\xe3"
b"\x81\x8c\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x95\xe3\x82\x8c\xe3\x82"
b"\x8b\xe3\x81\x93\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\x82\xe3\x81\xbe"
b"\xe3\x82\x8a\xe3\x81\x82\xe3\x82\x8a\xe3\x81\xbe\xe3\x81\x9b\xe3"
b"\x82\x93\xe3\x80\x82\x0a\xe8\xa8\x80\xe8\xaa\x9e\xe8\x87\xaa\xe4"
b"\xbd\x93\xe3\x81\xae\xe6\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x9c"
b"\x80\xe5\xb0\x8f\xe9\x99\x90\xe3\x81\xab\xe6\x8a\xbc\xe3\x81\x95"
b"\xe3\x81\x88\xe3\x80\x81\xe5\xbf\x85\xe8\xa6\x81\xe3\x81\xaa\xe6"
b"\xa9\x9f\xe8\x83\xbd\xe3\x81\xaf\xe6\x8b\xa1\xe5\xbc\xb5\xe3\x83"
b"\xa2\xe3\x82\xb8\xe3\x83\xa5\xe3\x83\xbc\xe3\x83\xab\xe3\x81\xa8"
b"\xe3\x81\x97\xe3\x81\xa6\xe8\xbf\xbd\xe5\x8a\xa0\xe3\x81\x99\xe3"
b"\x82\x8b\xe3\x80\x81\xe3\x81\xa8\xe3\x81\x84\xe3\x81\x86\xe3\x81"
b"\xae\xe3\x81\x8c\x20\x50\x79\x74\x68\x6f\x6e\x20\xe3\x81\xae\xe3"
b"\x83\x9d\xe3\x83\xaa\xe3\x82\xb7\xe3\x83\xbc\xe3\x81\xa7\xe3\x81"
b"\x99\xe3\x80\x82\x0a\x0a\xe3\x83\x8e\xe3\x81\x8b\xe3\x82\x9a\x20"
b"\xe3\x83\x88\xe3\x82\x9a\x20\xe3\x83\x88\xe3\x82\xad\xef\xa8\xb6"
b"\xef\xa8\xb9\x20\xf0\xa1\x9a\xb4\xf0\xaa\x8e\x8c\x20\xe9\xba\x80"
b"\xe9\xbd\x81\xf0\xa9\x9b\xb0\x0a"),
}
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/invalid_models/tests.py
|
57
|
import copy
import sys
from django.core.management.validation import get_validation_errors
from django.db.models.loading import cache, load_app
from django.test.utils import override_settings
from django.utils import unittest
from django.utils.six import StringIO
class InvalidModelTestCase(unittest.TestCase):
"""Import an appliation with invalid models and test the exceptions."""
def setUp(self):
# Make sure sys.stdout is not a tty so that we get errors without
# coloring attached (makes matching the results easier). We restore
# sys.stderr afterwards.
self.old_stdout = sys.stdout
self.stdout = StringIO()
sys.stdout = self.stdout
# This test adds dummy applications to the app cache. These
# need to be removed in order to prevent bad interactions
# with the flush operation in other tests.
self.old_app_models = copy.deepcopy(cache.app_models)
self.old_app_store = copy.deepcopy(cache.app_store)
def tearDown(self):
cache.app_models = self.old_app_models
cache.app_store = self.old_app_store
cache._get_models_cache = {}
sys.stdout = self.old_stdout
# Technically, this isn't an override -- TEST_SWAPPED_MODEL must be
# set to *something* in order for the test to work. However, it's
# easier to set this up as an override than to require every developer
# to specify a value in their test settings.
@override_settings(
TEST_SWAPPED_MODEL='invalid_models.ReplacementModel',
TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model',
TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target',
)
def test_invalid_models(self):
try:
module = load_app("invalid_models.invalid_models")
except Exception:
self.fail('Unable to load invalid model module')
get_validation_errors(self.stdout, module)
self.stdout.seek(0)
error_log = self.stdout.read()
actual = error_log.split('\n')
expected = module.model_errors.split('\n')
unexpected = [err for err in actual if err not in expected]
missing = [err for err in expected if err not in actual]
self.assertFalse(unexpected, "Unexpected Errors: " + '\n'.join(unexpected))
self.assertFalse(missing, "Missing Errors: " + '\n'.join(missing))
|
rmboggs/django
|
refs/heads/master
|
tests/i18n/other2/locale/__init__.py
|
12133432
| |
youngjeff/jobs
|
refs/heads/master
|
jobs/__init__.py
|
12133432
| |
Maikflow/django_test
|
refs/heads/master
|
lib/python2.7/site-packages/Django-1.7.1-py2.7.egg/django/contrib/gis/tests/distapp/__init__.py
|
12133432
| |
danrg/RGT-tool
|
refs/heads/master
|
src/RGT/XML/SVG/Filters/baseComponentTransferNode.py
|
1
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.transferFunctionElementAttributes import TransferFunctionElementAttributes
class BaseComponentTransferNode(BasicSvgNode, TransferFunctionElementAttributes):
def __init__(self, ownerDoc, tagName):
BasicSvgNode.__init__(self, ownerDoc, tagName)
TransferFunctionElementAttributes.__init__(self)
self._allowedSvgChildNodes.update({self.SVG_ANIMATE_NODE, self.SVG_SET_NODE})
|
db4ple/mchf-github
|
refs/heads/active-devel
|
mchf-eclipse/support/ui/menu/ui_menu_structure_c2py.py
|
4
|
#!/usr/bin/python
"""
2016-12-30 HB9ocq - extract uhsdr LCD-menu definition data from C-cource
and transform to Python, showing on stdout
WARNING: THIS BASES HEAVILY UPON (UN-)DISCIPLINE IN C SYNTAX ! ! !
WARNING: ANY CHANGE OF DEFINITIONS AND DECLARATIONS IN $INPUT_C_SRC
POTENTIALLY BREAKS THIS TRANSFORMATION ! ! !
LAST WARNING: YOU HAVE BEEN WARNED ! ! !
2017-01-01 HB9ocq - replacement for prototype shell script (ui_menu_structure_c2py.sh)
for improved parsing accuracy (avoids issues with commas and
all uppercase text in UiMenuDesc(...))
2017-01-06 HB9ocq - following change in definition of MenuDescriptor
AFTERLAST WARNING: BINDS EVEN TIGHTER TO WRITING DISCIPLINE IN C SYNTAX ! ! !
"""
import subprocess
# this points from HERE to the 'mchf-eclipse' directory of our project
MCHF_BASEDIR = r"../../../"
MCHF_VERSIONFILE = MCHF_BASEDIR + r'src/uhsdr_version.h'
# the ONLY C-source we do read AND understand
INPUT_C_SRC = MCHF_BASEDIR + r"drivers/ui/menu/ui_menu_structure.c"
# reading version from uhsdr_version.h
MAJ = subprocess.check_output('cat ' + MCHF_VERSIONFILE + ' | cut -d " " -f 2 | grep "TRX4M" | egrep -e "^[^#]" | grep "MAJOR" | cut -d "\\"" -f 2 | tr -d $"\n"', shell = True)
MIN = subprocess.check_output('cat ' + MCHF_VERSIONFILE + ' | cut -d " " -f 2 | grep "TRX4M" | egrep -e "^[^#]" | grep "MINOR" | cut -d "\\"" -f 2 | tr -d $"\n"', shell = True)
REL = subprocess.check_output('cat ' + MCHF_VERSIONFILE + ' | cut -d " " -f 2 | grep "TRX4M" | egrep -e "^[^#]" | grep "RELEASE" | cut -d "\\"" -f 2 | tr -d $"\n"', shell = True)
BUILD_ID = MAJ.decode() + "." + MIN.decode() + "." + REL.decode()
# reading version from mchf.bin
#BUILD_ID = subprocess.check_output('grep -aPo "(?<=fwv-)[^fwt]+" ../../../mchf.bin | cut -d "" -f 1 | tr -d $"\n"', shell = True)
#print("##DBG## BUILD_ID = '{}'".format(BUILD_ID))
from datetime import datetime
TS_NOW = datetime.now().replace(microsecond=0).isoformat()
#-----------------------------------------------------
# read in the whole C source
menu_csrc = []
with open(INPUT_C_SRC, 'r') as f:
menu_csrc = f.readlines()
MENU_DESCRIPTOR = []
"""
MENU_DESCRIPTOR is the datastructure we're gonna build
MENU_DESCRIPTOR is a list of dicts with entries "MENU_ID" "ME_KIND" "NR" "LABEL" "DESC"
e.g. [ ... { 'MENU_ID': "TOP",
'ME_KIND': "GROUP",
'NR': "MENU_BASE",
'LABEL': "Standard Menu",
'DESC': ":soon:" },
...]
"""
import re
MENULN_REO = re.compile( # pattern for lines..
r'^\s+{\s*' # ..starting with whitespace + a left brace
+ r'(?P<MENU_ID>MENU_[\w]+)' # ..an identifier 'MENU_xxx', we call it MENU_ID
+ r'[,\s]+'
+ r'(?P<ME_KIND>MENU_[\w]+)' # ..a 2nd id 'MENU_xxx', we call it ME_KIND
+ r'[,\s]+'
+ r'(?P<NR>[\w]+)' # ..a 3rd rather long id 'yy...zz', we call it NR
+ r'[,\s]+'
+ r'.*[^,]' # ..show conditional entries, too
# + r'NULL' # .. this would show only non-conditional entries
+ r'[,\s]+'
+ r'"(?P<LABEL>[^"]*)"' # ..n chars in quotes, we call this LABEL
+ r'[,\s]+'
+ r'UiMenuDesc\("(?P<DESC>[^"]*)"\s*\)' # ..a UiMenuDesc in quotes, we call it DESC
+ r'.*}.*$' # ..unintresting rest, incl. right brace
)
# each line of the C source...
for ln in menu_csrc:
# ...gets RE-searched for the interesting data,
m = MENULN_REO.search(ln)
if m: # ...as things match: keep the ready-cooked dict
MENU_DESCRIPTOR.append(m.groupdict())
if [] == MENU_DESCRIPTOR:
print('ERROR: MENU_DESCRIPTOR is empty! (no lines defining UiMenu entries found in "{}".'.format(INPUT_C_SRC))
exit(-1)
#-----------------------------------------------------
OUTPUT = """\
#!/usr/bin/env python
#
# WARNING: generated data! DO NOT EDIT MANUALLY ! ! !
#
# generated at {TS_NOW} by "{__file__}"
#
# out of {INPUT_C_SRC} ({BUILD_ID})
#
BUILD_ID = "{BUILD_ID}"
MENU_DESCRIPTOR = {}
#EOFILE
"""
from pprint import pformat
# are we running as a program?
if '__main__' == __name__:
print(OUTPUT.format(pformat(MENU_DESCRIPTOR), **globals()))
exit(0)
#EOFILE
|
jeffwidman/ansible-modules-core
|
refs/heads/devel
|
network/netvisor/pn_vlag.py
|
30
|
#!/usr/bin/python
""" PN CLI vlag-create/vlag-delete/vlag-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_vlag
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to create/delete/modify vlag.
description:
- Execute vlag-create/vlag-delete/vlag-modify command.
- A virtual link aggregation group (VLAG) allows links that are physically
connected to two different Pluribus Networks devices to appear as a single
trunk to a third device. The third device can be a switch, server, or any
Ethernet device. A VLAG can provide Layer 2 multipathing, which allows you
to create redundancy by increasing bandwidth, enabling multiple parallel
paths between nodes and loadbalancing traffic where alternative paths exist.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run this command on.
state:
description:
- State the action to perform. Use 'present' to create vlag,
'absent' to delete vlag and 'update' to modify vlag.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- The C(pn_name) takes a valid name for vlag configuration.
required: true
pn_port:
description:
- Specify the local VLAG port.
- Required for vlag-create.
pn_peer_port:
description:
- Specify the peer VLAG port.
- Required for vlag-create.
pn_mode:
description:
- Specify the mode for the VLAG. Active-standby indicates one side is
active and the other side is in standby mode. Active-active indicates
that both sides of the vlag are up by default.
choices: ['active-active', 'active-standby']
pn_peer_switch:
description:
- Specify the fabric-name of the peer switch.
pn_failover_action:
description:
- Specify the failover action as move or ignore.
choices: ['move', 'ignore']
pn_lacp_mode:
description:
- Specify the LACP mode.
choices: ['off', 'passive', 'active']
pn_lacp_timeout:
description:
- Specify the LACP timeout as slow(30 seconds) or fast(4 seconds).
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
"""
EXAMPLES = """
- name: create a VLAG
pn_vlag:
state: 'present'
pn_name: spine-to-leaf
pn_port: 'spine01-to-leaf'
pn_peer_port: 'spine02-to-leaf'
pn_peer_switch: spine02
pn_mode: 'active-active'
- name: delete VLAGs
pn_vlag:
state: 'absent'
pn_name: spine-to-leaf
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vlag command.
returned: always
type: list
stderr:
description: The set of error responses from the vlag command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VLAG_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the vlag-show command.
If a vlag with given vlag exists, return VLAG_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VLAG_EXISTS
"""
name = module.params['pn_name']
show = cli + ' vlag-show format name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global VLAG_EXISTS
if name in out:
VLAG_EXISTS = True
else:
VLAG_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vlag-create'
if state == 'absent':
command = 'vlag-delete'
if state == 'update':
command = 'vlag-modify'
return command
def main():
""" This section is for argument parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_port=dict(type='str'),
pn_peer_port=dict(type='str'),
pn_mode=dict(type='str', choices=[
'active-standby', 'active-active']),
pn_peer_switch=dict(type='str'),
pn_failover_action=dict(type='str', choices=['move', 'ignore']),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'individual', 'bundled']),
pn_lacp_fallback_timeout=dict(type='str')
),
required_if=(
["state", "present", ["pn_name", "pn_port", "pn_peer_port",
"pn_peer_switch"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Argument accessing
state = module.params['state']
name = module.params['pn_name']
port = module.params['pn_port']
peer_port = module.params['pn_peer_port']
mode = module.params['pn_mode']
peer_switch = module.params['pn_peer_switch']
failover_action = module.params['pn_failover_action']
lacp_mode = module.params['pn_lacp_mode']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'vlag-delete':
check_cli(module, cli)
if VLAG_EXISTS is False:
module.exit_json(
skipped=True,
msg='VLAG with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'vlag-create':
check_cli(module, cli)
if VLAG_EXISTS is True:
module.exit_json(
skipped=True,
msg='VLAG with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
if port:
cli += ' port %s peer-port %s ' % (port, peer_port)
if mode:
cli += ' mode ' + mode
if peer_switch:
cli += ' peer-switch ' + peer_switch
if failover_action:
cli += ' failover-' + failover_action + '-L2 '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
Motaku/ansible
|
refs/heads/devel
|
test/units/parsing/test_mod_args.py
|
109
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.errors import AnsibleParserError
from ansible.compat.tests import unittest
class TestModArgsDwim(unittest.TestCase):
# TODO: add tests that construct ModuleArgsParser with a task reference
# TODO: verify the AnsibleError raised on failure knows the task
# and the task knows the line numbers
def setUp(self):
pass
def _debug(self, mod, args, to):
print("RETURNED module = {0}".format(mod))
print(" args = {0}".format(args))
print(" to = {0}".format(to))
def tearDown(self):
pass
def test_basic_shell(self):
m = ModuleArgsParser(dict(shell='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
_uses_shell = True,
))
self.assertIsNone(to)
def test_basic_command(self):
m = ModuleArgsParser(dict(command='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
))
self.assertIsNone(to)
def test_shell_with_modifiers(self):
m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
creates = '/tmp/baz',
removes = '/tmp/bleep',
_raw_params = '/bin/foo',
_uses_shell = True,
))
self.assertIsNone(to)
def test_normal_usage(self):
m = ModuleArgsParser(dict(copy='src=a dest=b'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_complex_args(self):
m = ModuleArgsParser(dict(copy=dict(src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex(self):
m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex_and_complex_args(self):
m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b'))))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_local_action_string(self):
m = ModuleArgsParser(dict(local_action='copy src=a dest=b'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIs(to, 'localhost')
def test_multiple_actions(self):
m = ModuleArgsParser(dict(action='shell echo hi', local_action='shell echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(local_action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(ping='data=hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
|
40423123/2017springcd_hw
|
refs/heads/gh-pages
|
plugin/liquid_tags/b64img.py
|
312
|
"""
Image Tag
---------
This implements a Liquid-style image tag for Pelican,
based on the liquid img tag which is based on the octopress image tag [1]_
Syntax
------
{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}
Examples
--------
{% b64img /images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png 150 150 "Ninja Attack!" "Ninja in attack posture" %}
Output
------
<img src="data:;base64,....">
<img class="left half" src="data:;base64,..." title="Ninja Attack!" alt="Ninja Attack!">
<img class="left half" src="data:;base64,..." width="150" height="150" title="Ninja Attack!" alt="Ninja in attack posture">
[1] https://github.com/imathis/octopress/blob/master/plugins/image_tag.rb
"""
import re
import base64
import urllib2
from .mdx_liquid_tags import LiquidTags
import six
SYNTAX = '{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}'
# Regular expression to match the entire syntax
ReImg = re.compile("""(?P<class>\S.*\s+)?(?P<src>(?:https?:\/\/|\/|\S+\/)\S+)(?:\s+(?P<width>\d+))?(?:\s+(?P<height>\d+))?(?P<title>\s+.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e))
def base64image(src):
""" Generate base64 encoded image from srouce file. """
return base64.b64encode(_get_file(src))
@LiquidTags.register('b64img')
def b64img(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReImg.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in six.iteritems(match.groupdict()) if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
attrs['src'] = 'data:;base64,{}'.format(base64image(attrs['src']))
# Return the formatted text
return "<img {0}>".format(' '.join('{0}="{1}"'.format(key, val)
for (key, val) in six.iteritems(attrs)))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
|
fiuba08/robotframework
|
refs/heads/master
|
atest/testresources/testlibs/NamespaceUsingLibrary.py
|
38
|
from robot.libraries.BuiltIn import BuiltIn
class NamespaceUsingLibrary(object):
def __init__(self):
self._importing_suite = BuiltIn().get_variable_value('${SUITE NAME}')
self._easter = BuiltIn().get_library_instance('Easter')
def get_importing_suite(self):
return self._importing_suite
def get_other_library(self):
return self._easter
|
Centril/sublime-autohide-sidebar
|
refs/heads/master
|
main.py
|
1
|
"""
Autohide Sidebar
or: sublime-autohide-sidebar
A Sublime Text plugin that autohides the sidebar and shows
it when the mouse hovers the edge of the editors window.
Copyright (C) 2015, Mazdak Farrokhzad
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Mazdak Farrokhzad"
__copyright__ = "Copyright 2015, Mazdak Farrokhzad"
__credits__ = []
__license__ = "GPL3+"
__version__ = "0.0.1"
__maintainer__ = "Mazdak Farrokhzad"
__email__ = "twingoow@gmail.com"
__status__ = "Development"
ID = 'sublime-autohide-sidebar'
#
# Import stuff:
#
from sublime import active_window, windows, load_settings, set_timeout_async
from sublime_plugin import EventListener
from .counter import Counter
#
# Cross platform mouse movement event handler
#
from .driver import Driver
#
# Constants:
#
HIDE_DEFAULT_X = 450
def hs_padding_x():
global settings
return settings.get( 'hide_show_padding_x' )
#
# Per window wrapper:
#
class Wrapper( object ):
def __init__( self, window ):
global D
self.id = window.id()
self.window = window
self.toggled = False
self.suspended = False
D.register_new_window( self.id )
if self.is_sidebar_open(): self._toggle()
# Thanks https://github.com/titoBouzout
# https://github.com/SublimeText/SideBarFolders/blob/fb4b2ba5b8fe5b14453eebe8db05a6c1b918e029/SideBarFolders.py#L59-L75
def is_sidebar_open( self ):
view = self.window.active_view()
if view:
sel1 = view.sel()[0]
self.window.run_command( 'focus_side_bar' )
self.window.run_command( 'move',
{"by": "characters", "forward": True} )
sel2 = view.sel()[0]
if sel1 != sel2:
self.window.run_command( 'move',
{"by": "characters", "forward": False} )
return False
else:
group, index = self.window.get_view_index( view )
self.window.focus_view( view )
self.window.focus_group( group )
return True
return True # by default assume it's open if no view is opened
# Toggles the sidebar:
def _toggle( self ): self.window.run_command( "toggle_side_bar", ID )
def toggle_suspended( self ):
self.suspended = not self.suspended
self.toggled = not self.toggled
# Given an x coordinate: whether or not sidebar should hide:
def should_hide( self, x ):
global D
w = D.window_width( self.id ) or HIDE_DEFAULT_X
w2 = self.window.active_view().viewport_extent()[0] or 0
return x >= (w - w2 - hs_padding_x() * 2)
# Given an x coordinate: whether or not sidebar should show:
def should_show( self, x ): return x < hs_padding_x()
# Toggles side bar if pred is fulfilled and flips toggled state:
def win_if_toggle( self, pred ):
if self.suspended: return
if pred( self.toggled ):
self.toggled = not self.toggled
self._toggle()
# Hides sidebar if it should be hidden, or shows if it should:
def hide_or_show( self ):
global D
if self.suspended: return
r = D.window_coordinates( self.id )
self.toggled = (not self.should_hide( r[0] )
if self.is_sidebar_open()
else self.should_show( r[0] ) )\
if r else False
if (self.toggled if r else self.is_sidebar_open()): self._toggle()
# On move handler:
def move( self, x ):
self.win_if_toggle( lambda s:
(self.should_hide if s else self.should_show)( x ))
# On leave handler:
def leave( self ): self.win_if_toggle( lambda s: s )
#
# Making and getting wrappers:
#
def reset_wrappers():
global wrappers
wrappers = {}
# Returns a wrapper for _id:
def wrapper( _id ):
global wrappers
return wrappers[_id or active_window().id()]
# Registers a new window:
def register_new( window ):
global wrappers
w = Wrapper( window )
wrappers[w.id] = w
return w
# Returns the wrapper, registers it if not:
def wrapper_or_register( window ):
global wrappers
_id = window.id()
return wrapper( _id ) if _id in wrappers else register_new( window )
#
# Plugin listeners & loading:
#
# Get an on_load_counter, or set one if not before:
on_load_counters = {}
def on_load_counter( _id ):
global on_load_counters
if _id not in on_load_counters: on_load_counters[_id] = Counter()
return on_load_counters[_id]
# Hide sidebars in new windows:
class Listener( EventListener ):
# Non-fake toggle_side_bar: Suspend tracking for this window!
def on_window_command( self, window, name, args ):
if name == 'toggle_side_bar' and args != ID:
wrapper( window.id() ).toggle_suspended()
# Wait: last on_load in sequence => make or get wrapper and hide/show it.
def on_load( self, view ):
w = view.window()
c = on_load_counter( w.id() )
c.inc()
# Handle on_load, get the appropriate wrapper or make one:
set_timeout_async( lambda: not c.dec() and
wrapper_or_register( w ).hide_or_show(), 0 )
def plugin_loaded():
print( "pre-load-settings")
# Load settings:
global settings
settings = load_settings( 'sublime-autohide-sidebar.sublime-settings' )
print( "post-load-settings" )
# Hide ALL sidebars:
global D, T
D = Driver()
reset_wrappers()
for w in windows(): register_new( w )
# Start receiving events:
def move( _id, x, y ): wrapper( _id ).move( x )
def leave( _id ): wrapper( _id ).leave()
T = D.tracker( move, leave )
T.start()
print( "post-T.start()")
def plugin_unloaded():
print("stop#1")
# Stop receiving events:
global D, T
T.stopx()
|
jaywreddy/django
|
refs/heads/master
|
tests/view_tests/default_urls.py
|
405
|
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
# This is the same as in the default project template
url(r'^admin/', admin.site.urls),
]
|
doismellburning/edx-platform
|
refs/heads/master
|
lms/djangoapps/verify_student/migrations/0003_auto__add_field_softwaresecurephotoverification_display.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SoftwareSecurePhotoVerification.display'
db.add_column('verify_student_softwaresecurephotoverification', 'display',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.display'
db.delete_column('verify_student_softwaresecurephotoverification', 'display')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'<function uuid4 at 0x3176410>'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
}
}
complete_apps = ['verify_student']
|
aequitas/csvkit
|
refs/heads/master
|
tests/test_utilities/test_sql2csv.py
|
21
|
#!/usr/bin/env python
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
from csvkit.utilities.sql2csv import SQL2CSV
from csvkit.utilities.csvsql import CSVSQL
from tests.utils import stdin_as_string
class TestSQL2CSV(unittest.TestCase):
def setUp(self):
self.db_file = "foo.db"
def tearDown(self):
try:
os.remove(self.db_file)
except OSError:
pass
def csvsql(self, csv_file):
"""
Load test data into the DB and return it as a string for comparison.
"""
args = ['--db', "sqlite:///" + self.db_file, '--table', 'foo', '--insert', csv_file]
utility = CSVSQL(args)
utility.main()
return open(csv_file, 'r').read().strip()
def test_query(self):
args = ['--query', 'select 6*9 as question']
output_file = six.StringIO()
utility = SQL2CSV(args, output_file)
utility.main()
csv = output_file.getvalue()
self.assertTrue('question' in csv)
self.assertTrue('54' in csv)
def test_stdin(self):
output_file = six.StringIO()
input_file = six.StringIO("select cast(3.1415 * 13.37 as integer) as answer")
with stdin_as_string(input_file):
utility = SQL2CSV([], output_file)
utility.main()
csv = output_file.getvalue()
self.assertTrue('answer' in csv)
self.assertTrue('42' in csv)
def test_stdin_with_query(self):
args = ['--query', 'select 6*9 as question']
output_file = six.StringIO()
input_file = six.StringIO("select cast(3.1415 * 13.37 as integer) as answer")
with stdin_as_string(input_file):
utility = SQL2CSV(args, output_file)
utility.main()
csv = output_file.getvalue()
self.assertTrue('question' in csv)
self.assertTrue('42' not in csv)
def test_unicode(self):
target_output = self.csvsql('examples/test_utf8.csv')
args = ['--db', "sqlite:///" + self.db_file, '--query', 'select * from foo']
output_file = six.StringIO()
utility = SQL2CSV(args, output_file)
utility.main()
self.assertEqual(output_file.getvalue().strip(), target_output)
def test_no_header_row(self):
self.csvsql('examples/dummy.csv')
args = ['--db', "sqlite:///" + self.db_file, '--no-header-row', '--query', 'select * from foo']
output_file = six.StringIO()
utility = SQL2CSV(args, output_file)
utility.main()
csv = output_file.getvalue()
self.assertTrue('a,b,c' not in csv)
self.assertTrue('1,2,3' in csv)
def test_linenumbers(self):
self.csvsql('examples/dummy.csv')
args = ['--db', "sqlite:///" + self.db_file, '--linenumbers', '--query', 'select * from foo']
output_file = six.StringIO()
utility = SQL2CSV(args, output_file)
utility.main()
csv = output_file.getvalue()
self.assertTrue('line_number,a,b,c' in csv)
self.assertTrue('1,1,2,3' in csv)
|
dursk/django
|
refs/heads/master
|
django/db/migrations/autodetector.py
|
140
|
from __future__ import unicode_literals
import datetime
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None)
and not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None)
and not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None)
and not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and
not isinstance(field, models.ManyToManyField) and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = (
isinstance(old_field, models.ManyToManyField) and
isinstance(new_field, models.ManyToManyField)
)
neither_m2m = (
not isinstance(old_field, models.ManyToManyField) and
not isinstance(new_field, models.ManyToManyField)
)
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
|
liorvh/golismero
|
refs/heads/master
|
thirdparty_libs/django/conf/locale/fa/formats.py
|
234
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j F Y، ساعت G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y/n/j'
SHORT_DATETIME_FORMAT = 'Y/n/j، G:i:s'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
poeschlr/kicad-3d-models-in-freecad
|
refs/heads/master
|
cadquery/FCAD_script_generator/molex/cq_models/conn_molex_kk_6410.py
|
1
|
# -*- coding: utf8 -*-
#!/usr/bin/python
#
# CadQuery script returning Molex KK 6410 Connectors
## requirements
## freecad (v1.5 and v1.6 have been tested)
## cadquery FreeCAD plugin (v0.3.0 and v0.2.0 have been tested)
## https://github.com/jmwright/cadquery-freecad-module
## This script can be run from within the cadquery module of freecad.
## To generate VRML/ STEP files for, use launch-cq-molex
## script of the parent directory.
#* This is a cadquery script for the generation of MCAD Models. *
#* *
#* Copyright (c) 2016 *
#* Rene Poeschl https://github.com/poeschlr *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License (GPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#* The models generated with this script add the following exception: *
#* As a special exception, if you create a design which uses this symbol, *
#* and embed this symbol or unaltered portions of this symbol into the *
#* design, this symbol does not by itself cause the resulting design to *
#* be covered by the GNU General Public License. This exception does not *
#* however invalidate any other reasons why the design itself might be *
#* covered by the GNU General Public License. If you modify this symbol, *
#* you may extend this exception to your version of the symbol, but you *
#* are not obligated to do so. If you do not wish to do so, delete this *
#* exception statement from your version. *
#****************************************************************************
__title__ = "model description for Molex KK 6410 series connectors"
__author__ = "hackscribble"
__Comment__ = 'model description for Molex KK 6410 series connectors using cadquery'
___ver___ = "0.3 04/12/2017"
class LICENCE_Info():
############################################################################
STR_licAuthor = "Ray Benitez"
STR_licEmail = "hackscribble@outlook.com"
STR_licOrgSys = ""
STR_licPreProc = ""
LIST_license = ["",]
############################################################################
import cadquery as cq
from Helpers import show
from collections import namedtuple
import FreeCAD
class series_params():
series = "KK-254"
manufacturer = 'Molex'
mpn_format_string = 'AE-6410-{pincount:02d}A'
orientation = 'V'
datasheet = 'http://www.molex.com/pdm_docs/sd/022272021_sd.pdf'
pinrange = range(2, 17)
mount_pin = ''
number_of_rows = 2
body_color_key = "white body"
pins_color_key = "metal grey pins"
color_keys = [
body_color_key,
pins_color_key
]
obj_suffixes = [
'__body',
'__pins'
]
pitch = 2.54
pin_width = 0.64
pin_chamfer_long = 0.25
pin_chamfer_short = 0.25
pin_height = 14.22 # DIMENSION C
pin_depth = 3.56 # DIMENSION F depth below bottom surface of base
pin_inside_distance = 1.27 # Distance between centre of end pin and end of body
body_width = 5.8
body_height = 3.17
body_channel_depth = 0.6
body_channel_width = 1.5
body_cutout_length = 1.2
body_cutout_width = 0.6
ramp_split_breakpoint = 6 # Above this number of pins, the tab is split into two parts
ramp_chamfer_x = 0.3
ramp_chamfer_y = 0.7
calcDim = namedtuple( 'calcDim', ['length', 'ramp_height', 'ramp_width', 'ramp_offset'])
def dimensions(num_pins):
length = (num_pins-1) * series_params.pitch + 2 * series_params.pin_inside_distance
ramp_height = 11.7 - series_params.body_height
if num_pins > series_params.ramp_split_breakpoint:
ramp_width = series_params.pitch * 2
ramp_offset = series_params.pitch * (num_pins -5) / 2
else:
ramp_width = (num_pins - 1) * series_params.pitch / 2
ramp_offset = 0
return calcDim(length = length, ramp_height = ramp_height, ramp_width = ramp_width, ramp_offset = ramp_offset)
def generate_straight_pin():
pin_width=series_params.pin_width
pin_depth=series_params.pin_depth
pin_height=series_params.pin_height
pin_inside_distance=series_params.pin_inside_distance
chamfer_long = series_params.pin_chamfer_long
chamfer_short = series_params.pin_chamfer_short
pin=cq.Workplane("YZ").workplane(offset=-pin_width/2.0)\
.moveTo(-pin_width/2.0, -pin_depth)\
.rect(pin_width, pin_height, False)\
.extrude(pin_width)
pin = pin.faces(">Z").edges(">X").chamfer(chamfer_short,chamfer_long)
pin = pin.faces(">Z").edges("<X").chamfer(chamfer_short,chamfer_long)
pin = pin.faces(">Z").edges(">Y").chamfer(chamfer_long,chamfer_short)
pin = pin.faces(">Z").edges("<Y").chamfer(chamfer_short,chamfer_long)
pin = pin.faces("<Z").edges(">X").chamfer(chamfer_short,chamfer_long)
pin = pin.faces("<Z").edges("<X").chamfer(chamfer_short,chamfer_long)
pin = pin.faces("<Z").edges(">Y").chamfer(chamfer_short,chamfer_long)
pin = pin.faces("<Z").edges("<Y").chamfer(chamfer_short,chamfer_long)
return pin
def generate_pins(num_pins):
pitch=series_params.pitch
pin=generate_straight_pin()
pins = pin
for i in range(0, num_pins):
pins = pins.union(pin.translate((i * pitch, 0, 0)))
return pins
def generate_body(num_pins ,calc_dim):
pin_inside_distance = series_params.pin_inside_distance
pin_width = series_params.pin_width
pitch = series_params.pitch
body_len = calc_dim.length
body_width = series_params.body_width
body_height = series_params.body_height
body_channel_depth = series_params.body_channel_depth
body_channel_width = series_params.body_channel_width
body_cutout_length = series_params.body_cutout_length
body_cutout_width = series_params.body_cutout_width
# ramp_split_breakpoint = series_params.ramp_split_breakpoint
ramp_chamfer_x = series_params.ramp_chamfer_x
ramp_chamfer_y = series_params.ramp_chamfer_y
ramp_height = calc_dim.ramp_height
ramp_width = calc_dim.ramp_width
ramp_offset = calc_dim.ramp_offset
body = cq.Workplane("XY").moveTo(((num_pins-1)*pitch)/2.0, 0).rect(body_len, body_width).extrude(body_height)#.edges("|Z").fillet(0.05)
body = body.faces("<Z").workplane().rarray(pitch, 1, num_pins, 1)\
.rect(body_channel_width, body_width).cutBlind(-body_channel_depth)
body = body.faces(">Z").workplane().moveTo(((num_pins-1)*pitch)/2.0, 0).rarray(pitch, 1, num_pins-1, 1)\
.rect(body_cutout_width, body_cutout_length).cutThruAll(False)
ramp = cq.Workplane("YZ").workplane(offset=ramp_offset).moveTo(-body_width/2.0, body_height)\
.line(0,ramp_height)\
.line(1.0,0)\
.line(0,-3.8)\
.line(0.5,-0.9)\
.line(0,-1.0)\
.line(-0.5,-0.5)\
.line(0,-1.7)\
.threePointArc((-body_width/2.0 + 1 + (0.6 * (1-0.707)), body_height + (1 - 0.707)* 0.6), (-body_width/2.0 + 1 + 0.6, body_height))\
.close().extrude(ramp_width).faces(">X").edges(">Z").chamfer(ramp_chamfer_x, ramp_chamfer_y)
ramp_mirror = ramp.mirror("YZ")
ramp = ramp.union(ramp_mirror).translate(((num_pins - 1) * pitch / 2.0, 0, 0))
body = body.union(ramp)
return body, None
def generate_part(num_pins):
calc_dim = dimensions(num_pins)
pins = generate_pins(num_pins)
body, insert = generate_body(num_pins, calc_dim)
return (body, pins)
# opened from within freecad
if "module" in __name__:
part_to_build = 16
FreeCAD.Console.PrintMessage("Started from CadQuery: building " +
str(part_to_build) + "pin variant\n")
(body, pins) = generate_part(part_to_build)
show(pins)
show(body)
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/lint/lib/python2.7/site-packages/hamcrest/library/text/stringendswith.py
|
6
|
from hamcrest.library.text.substringmatcher import SubstringMatcher
from hamcrest.core.helpers.hasmethod import hasmethod
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class StringEndsWith(SubstringMatcher):
def __init__(self, substring):
super(StringEndsWith, self).__init__(substring)
def _matches(self, item):
if not hasmethod(item, 'endswith'):
return False
return item.endswith(self.substring)
def relationship(self):
return 'ending with'
def ends_with(string):
"""Matches if object is a string ending with a given string.
:param string: The string to search for.
This matcher first checks whether the evaluated object is a string. If so,
it checks if ``string`` matches the ending characters of the evaluated
object.
Example::
ends_with("bar")
will match "foobar".
"""
return StringEndsWith(string)
|
fmaguire/BayeHem
|
refs/heads/master
|
bayehem/rsem/detonate-1.11/ref-eval/boost/tools/build/v2/test/core_import_module.py
|
45
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("code", """\
module a
{
rule r1 ( )
{
ECHO R1 ;
}
local rule l1 ( )
{
ECHO A.L1 ;
}
}
module a2
{
rule r2 ( )
{
ECHO R2 ;
}
}
IMPORT a2 : r2 : : a2.r2 ;
rule a.l1 ( )
{
ECHO L1 ;
}
module b
{
IMPORT_MODULE a : b ;
rule test
{
# Call rule visible via IMPORT_MODULE
a.r1 ;
# Call rule in global scope
a2.r2 ;
# Call rule in global scope. Doesn't find local rule
a.l1 ;
# Make l1 visible
EXPORT a : l1 ;
a.l1 ;
}
}
IMPORT b : test : : test ;
test ;
module c
{
rule test
{
ECHO CTEST ;
}
}
IMPORT_MODULE c : ;
c.test ;
actions do-nothing { }
do-nothing all ;
""")
t.run_build_system(["-fcode"], stdout="""\
R1
R2
L1
A.L1
CTEST
""")
t.cleanup()
|
codenote/chromium-test
|
refs/heads/master
|
chrome/test/pyautolib/chromeos/__init__.py
|
12133432
| |
moshele/networking-mlnx
|
refs/heads/master
|
neutron/tests/unit/ml2/drivers/__init__.py
|
12133432
| |
vsajip/django
|
refs/heads/django3
|
tests/regressiontests/one_to_one_regress/models.py
|
9
|
from __future__ import unicode_literals
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __unicode__(self):
return "%s the place" % self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place)
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __unicode__(self):
return "%s the restaurant" % self.place.name
class Bar(models.Model):
place = models.OneToOneField(Place)
serves_cocktails = models.BooleanField()
def __unicode__(self):
return "%s the bar" % self.place.name
class UndergroundBar(models.Model):
place = models.OneToOneField(Place, null=True)
serves_cocktails = models.BooleanField()
class Favorites(models.Model):
name = models.CharField(max_length = 50)
restaurants = models.ManyToManyField(Restaurant)
def __unicode__(self):
return "Favorites for %s" % self.name
class Target(models.Model):
pass
class Pointer(models.Model):
other = models.OneToOneField(Target, primary_key=True)
class Pointer2(models.Model):
other = models.OneToOneField(Target)
|
wilvk/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/linode/linode.py
|
41
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: linode
short_description: Manage instances on the Linode Public Cloud
description:
- Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
version_added: "1.3"
options:
state:
description:
- Indicate desired state of the resource
choices: [ absent, active, deleted, present, restarted, started, stopped ]
default: present
api_key:
description:
- Linode API key
name:
description:
- Name to give the instance (alphanumeric, dashes, underscore).
- To keep sanity on the Linode Web Console, name is prepended with C(LinodeID_).
displaygroup:
description:
- Add the instance to a Display Group in Linode Manager.
version_added: "2.3"
linode_id:
description:
- Unique ID of a linode server
aliases: [ lid ]
additional_disks:
description:
- List of dictionaries for creating additional disks that are added to the Linode configuration settings.
- Dictionary takes Size, Label, Type. Size is in MB.
version_added: "2.3"
alert_bwin_enabled:
description:
- Set status of bandwidth in alerts.
type: bool
version_added: "2.3"
alert_bwin_threshold:
description:
- Set threshold in MB of bandwidth in alerts.
version_added: "2.3"
alert_bwout_enabled:
description:
- Set status of bandwidth out alerts.
type: bool
version_added: "2.3"
alert_bwout_threshold:
description:
- Set threshold in MB of bandwidth out alerts.
version_added: "2.3"
alert_bwquota_enabled:
description:
- Set status of bandwidth quota alerts as percentage of network transfer quota.
type: bool
version_added: "2.3"
alert_bwquota_threshold:
description:
- Set threshold in MB of bandwidth quota alerts.
version_added: "2.3"
alert_cpu_enabled:
description:
- Set status of receiving CPU usage alerts.
type: bool
version_added: "2.3"
alert_cpu_threshold:
description:
- Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
version_added: "2.3"
alert_diskio_enabled:
description:
- Set status of receiving disk IO alerts.
type: bool
version_added: "2.3"
alert_diskio_threshold:
description:
- Set threshold for average IO ops/sec over 2 hour period.
version_added: "2.3"
backupweeklyday:
description:
- Integer value for what day of the week to store weekly backups.
version_added: "2.3"
plan:
description:
- plan to use for the instance (Linode plan)
payment_term:
description:
- payment term to use for the instance (payment term in months)
default: 1
choices: [ 1, 12, 24 ]
password:
description:
- root password to apply to a new server (auto generated if missing)
private_ip:
description:
- Add private IPv4 address when Linode is created.
type: bool
default: "no"
version_added: "2.3"
ssh_pub_key:
description:
- SSH public key applied to root user
swap:
description:
- swap size in MB
default: 512
distribution:
description:
- distribution to use for the instance (Linode Distribution)
datacenter:
description:
- datacenter to create an instance in (Linode Datacenter)
kernel_id:
description:
- kernel to use for the instance (Linode Kernel)
version_added: "2.4"
wait:
description:
- wait for the instance to be in state C(running) before returning
type: bool
default: "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
watchdog:
description:
- Set status of Lassie watchdog.
type: bool
default: "True"
version_added: "2.2"
requirements:
- python >= 2.6
- linode-python
author:
- Vincent Viallet (@zbal)
notes:
- C(LINODE_API_KEY) env variable can be used instead.
'''
EXAMPLES = '''
- name: Create a server with a private IP Address
linode:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
private_ip: yes
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
delegate_to: localhost
- name: Fully configure new server
linode:
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 4
datacenter: 2
distribution: 99
kernel_id: 138
password: 'superSecureRootPassword'
private_ip: yes
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
alert_bwquota_enabled: True
alert_bwquota_threshold: 80
alert_bwin_enabled: True
alert_bwin_threshold: 10
alert_cpu_enabled: True
alert_cpu_threshold: 210
alert_bwout_enabled: True
alert_bwout_threshold: 10
alert_diskio_enabled: True
alert_diskio_threshold: 10000
backupweeklyday: 1
backupwindow: 2
displaygroup: 'test'
additional_disks:
- {Label: 'disk1', Size: 2500, Type: 'raw'}
- {Label: 'newdisk', Size: 2000}
watchdog: True
delegate_to: localhost
- name: Ensure a running server (create if missing)
linode:
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
delegate_to: localhost
- name: Delete a server
linode:
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: absent
delegate_to: localhost
- name: Stop a server
linode:
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: stopped
delegate_to: localhost
- name: Reboot a server
linode:
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: restarted
delegate_to: localhost
'''
import os
import time
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
from ansible.module_utils.basic import AnsibleModule
def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
punct = ''.join(random.choice(string.punctuation) for x in range(6))
p = lower + upper + number + punct
return ''.join(random.sample(p, len(p)))
def getInstanceDetails(api, server):
'''
Return the details of an instance, populating IPs, etc.
'''
instance = {'id': server['LINODEID'],
'name': server['LABEL'],
'public': [],
'private': []}
# Populate with ips
for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
if ip['ISPUBLIC'] and 'ipv4' not in instance:
instance['ipv4'] = ip['IPADDRESS']
instance['fqdn'] = ip['RDNS_NAME']
if ip['ISPUBLIC']:
instance['public'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
else:
instance['private'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name,
displaygroup, plan, additional_disks, distribution,
datacenter, kernel_id, linode_id, payment_term, password,
private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
instances = []
changed = False
new_server = False
servers = []
disks = []
configs = []
jobs = []
# See if we can match an existing server details with the provided linode_id
if linode_id:
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
configs = api.linode_config_list(LinodeId=linode_id)
# Act on the state
if state in ('active', 'present', 'started'):
# TODO: validate all the plan / distribution / datacenter are valid
# Multi step process/validation:
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in (name, plan, distribution, datacenter):
if not arg:
module.fail_json(msg='%s is required for %s state' % (arg, state))
# Create linode entity
new_server = True
# Get size of all individually listed disks to subtract from Distribution disk
used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name))
# Update Linode with Ansible configuration options
api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
# Save server
servers = api.linode_list(LinodeId=linode_id)
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
# Add private IP to Linode
if private_ip:
try:
res = api.linode_ip_addprivate(LinodeID=linode_id)
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
if not disks:
for arg in (name, linode_id, distribution):
if not arg:
module.fail_json(msg='%s is required for %s state' % (arg, state))
# Create disks (1 from distrib, 1 for SWAP)
new_server = True
try:
if not password:
# Password is required on creation, if not provided generate one
password = randompass()
if not swap:
swap = 512
# Create data disk
size = servers[0]['TOTALHD'] - used_disk_space - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id),
Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id),
Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
# Create individually listed disks at specified size
if additional_disks:
for disk in additional_disks:
# If a disk Type is not passed in, default to ext4
if disk.get('Type') is None:
disk['Type'] = 'ext4'
res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
jobs.append(res['JobID'])
except Exception as e:
# TODO: destroy linode ?
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
if not configs:
for arg in (name, linode_id, distribution):
if not arg:
module.fail_json(msg='%s is required for %s state' % (arg, state))
# Check architecture
for distrib in api.avail_distributions():
if distrib['DISTRIBUTIONID'] != distribution:
continue
arch = '32'
if distrib['IS64BIT']:
arch = '64'
break
# Get latest kernel matching arch if kernel_id is not specified
if not kernel_id:
for kernel in api.avail_kernels():
if not kernel['LABEL'].startswith('Latest %s' % arch):
continue
kernel_id = kernel['KERNELID']
break
# Get disk list
disks_id = []
for disk in api.linode_disk_list(LinodeId=linode_id):
if disk['TYPE'] == 'ext3':
disks_id.insert(0, str(disk['DISKID']))
continue
disks_id.append(str(disk['DISKID']))
# Trick to get the 9 items in the list
while len(disks_id) < 9:
disks_id.append('')
disks_list = ','.join(disks_id)
# Create config
new_server = True
try:
api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
Disklist=disks_list, Label='%s config' % name)
configs = api.linode_config_list(LinodeId=linode_id)
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
# Start / Ensure servers are running
for server in servers:
# Refresh server state
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# Ensure existing servers are up and running, boot if necessary
if server['STATUS'] != 1:
res = api.linode_boot(LinodeId=linode_id)
jobs.append(res['JobID'])
changed = True
# wait here until the instances are up
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
# refresh the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# status:
# -2: Boot failed
# 1: Running
if server['STATUS'] in (-2, 1):
break
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg='%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
instance = getInstanceDetails(api, server)
# depending on wait flag select the status
if wait:
instance['status'] = 'Running'
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
instances.append(instance)
elif state in ('stopped'):
if not linode_id:
module.fail_json(msg='linode_id is required for stopped state')
if not servers:
module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
if server['STATUS'] != 2:
try:
res = api.linode_shutdown(LinodeId=linode_id)
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Stopping'
changed = True
else:
instance['status'] = 'Stopped'
instances.append(instance)
elif state in ('restarted'):
if not linode_id:
module.fail_json(msg='linode_id is required for restarted state')
if not servers:
module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
try:
res = api.linode_reboot(LinodeId=server['LINODEID'])
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)
try:
api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Deleting'
changed = True
instances.append(instance)
# Ease parsing if only 1 instance
if len(instances) == 1:
module.exit_json(changed=changed, instance=instances[0])
module.exit_json(changed=changed, instances=instances)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present',
choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
api_key=dict(type='str', no_log=True),
name=dict(type='str'),
alert_bwin_enabled=dict(type='bool'),
alert_bwin_threshold=dict(type='int'),
alert_bwout_enabled=dict(type='bool'),
alert_bwout_threshold=dict(type='int'),
alert_bwquota_enabled=dict(type='bool'),
alert_bwquota_threshold=dict(type='int'),
alert_cpu_enabled=dict(type='bool'),
alert_cpu_threshold=dict(type='int'),
alert_diskio_enabled=dict(type='bool'),
alert_diskio_threshold=dict(type='int'),
backupsenabled=dict(type='int'),
backupweeklyday=dict(type='int'),
backupwindow=dict(type='int'),
displaygroup=dict(type='str', default=''),
plan=dict(type='int'),
additional_disks=dict(type='list'),
distribution=dict(type='int'),
datacenter=dict(type='int'),
kernel_id=dict(type='int'),
linode_id=dict(type='int', aliases=['lid']),
payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
password=dict(type='str', no_log=True),
private_ip=dict(type='bool'),
ssh_pub_key=dict(type='str'),
swap=dict(type='int', default=512),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
watchdog=dict(type='bool', default=True),
),
)
if not HAS_LINODE:
module.fail_json(msg='linode-python required for this module')
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
alert_bwin_enabled = module.params.get('alert_bwin_enabled')
alert_bwin_threshold = module.params.get('alert_bwin_threshold')
alert_bwout_enabled = module.params.get('alert_bwout_enabled')
alert_bwout_threshold = module.params.get('alert_bwout_threshold')
alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
alert_cpu_enabled = module.params.get('alert_cpu_enabled')
alert_cpu_threshold = module.params.get('alert_cpu_threshold')
alert_diskio_enabled = module.params.get('alert_diskio_enabled')
alert_diskio_threshold = module.params.get('alert_diskio_threshold')
backupsenabled = module.params.get('backupsenabled')
backupweeklyday = module.params.get('backupweeklyday')
backupwindow = module.params.get('backupwindow')
displaygroup = module.params.get('displaygroup')
plan = module.params.get('plan')
additional_disks = module.params.get('additional_disks')
distribution = module.params.get('distribution')
datacenter = module.params.get('datacenter')
kernel_id = module.params.get('kernel_id')
linode_id = module.params.get('linode_id')
payment_term = module.params.get('payment_term')
password = module.params.get('password')
private_ip = module.params.get('private_ip')
ssh_pub_key = module.params.get('ssh_pub_key')
swap = module.params.get('swap')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
watchdog = int(module.params.get('watchdog'))
kwargs = dict()
check_items = dict(
alert_bwin_enabled=alert_bwin_enabled,
alert_bwin_threshold=alert_bwin_threshold,
alert_bwout_enabled=alert_bwout_enabled,
alert_bwout_threshold=alert_bwout_threshold,
alert_bwquota_enabled=alert_bwquota_enabled,
alert_bwquota_threshold=alert_bwquota_threshold,
alert_cpu_enabled=alert_cpu_enabled,
alert_cpu_threshold=alert_cpu_threshold,
alert_diskio_enabled=alert_diskio_enabled,
alert_diskio_threshold=alert_diskio_threshold,
backupweeklyday=backupweeklyday,
backupwindow=backupwindow,
)
for key, value in check_items.items():
if value is not None:
kwargs[key] = value
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception as e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name,
displaygroup, plan,
additional_disks, distribution, datacenter, kernel_id, linode_id,
payment_term, password, private_ip, ssh_pub_key, swap, wait,
wait_timeout, watchdog, **kwargs)
if __name__ == '__main__':
main()
|
SpaceKatt/CSPLN
|
refs/heads/master
|
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/gluon/contrib/plural_rules/sl.py
|
44
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for sl (Slovenian)
nplurals=4 # Slovenian language has 4 forms:
# 1 singular and 3 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n % 100 == 1 else
1 if n % 100 == 2 else
2 if n % 100 in (3,4) else
3)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
|
compiteing/flask-ponypermission
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py
|
203
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import logging
__version__ = '0.2.0'
class DistlibException(Exception):
pass
try:
from logging import NullHandler
except ImportError: # pragma: no cover
class NullHandler(logging.Handler):
def handle(self, record): pass
def emit(self, record): pass
def createLock(self): self.lock = None
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
|
hip-odoo/odoo
|
refs/heads/10.0
|
addons/website_hr_recruitment/controllers/__init__.py
|
7372
|
import main
|
noelbk/neutron-juniper
|
refs/heads/master
|
neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py
|
20
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
from neutron.db import db_base_plugin_v2
class FakeL2Plugin(db_base_plugin_v2.NeutronDbPluginV2):
supported_extension_aliases = []
|
ChameleonCloud/horizon
|
refs/heads/chameleoncloud/train
|
openstack_dashboard/api/__init__.py
|
4
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2013 Big Switch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined attributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import keystone
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.api import swift
__all__ = [
"base",
"cinder",
"glance",
"keystone",
"network",
"neutron",
"nova",
"swift",
]
|
leotrubach/sourceforge-allura
|
refs/heads/dev
|
ForgeSVN/forgesvn/model/svn.py
|
1
|
import os
import shutil
import string
import logging
import subprocess
from subprocess import Popen, PIPE
from hashlib import sha1
from cStringIO import StringIO
from datetime import datetime
import tg
import pysvn
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pymongo.errors import DuplicateKeyError
from pylons import c
from ming.base import Object
from ming.orm import Mapper, FieldProperty, session
from ming.utils import LazyProperty
from allura import model as M
from allura.lib import helpers as h
from allura.model.repository import GitLikeTree
from allura.model.auth import User
log = logging.getLogger(__name__)
class Repository(M.Repository):
tool_name='SVN'
repo_id='svn'
type_s='SVN Repository'
class __mongometa__:
name='svn-repository'
branches = FieldProperty([dict(name=str,object_id=str)])
@LazyProperty
def _impl(self):
return SVNImplementation(self)
def _log(self, rev, skip, max_count):
ci = self.commit(rev)
if ci is None: return []
return ci.log(int(skip), int(max_count))
def clone_command(self, category, username=''):
'''Return a string suitable for copy/paste that would clone this repo locally
category is one of 'ro' (read-only), 'rw' (read/write), or 'https' (read/write via https)
'''
if not username and c.user not in (None, User.anonymous()):
username = c.user.username
tpl = string.Template(tg.config.get('scm.clone.%s.%s' % (category, self.tool)) or
tg.config.get('scm.clone.%s' % self.tool))
return tpl.substitute(dict(username=username,
source_url=self.clone_url(category, username)+c.app.config.options.get('checkout_url'),
dest_path=self.suggested_clone_dest_path()))
def compute_diffs(self): return
def count(self, *args, **kwargs):
return super(Repository, self).count(None)
def log(self, branch=None, offset=0, limit=10):
return list(self._log(rev=branch, skip=offset, max_count=limit))
def latest(self, branch=None):
if self._impl is None: return None
if not self.heads: return None
return self._impl.commit(self.heads[0].object_id)
class SVNCalledProcessError(Exception):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command: '%s' returned non-zero exit status %s\nSTDOUT: %s\nSTDERR: %s" % \
(self.cmd, self.returncode, self.stdout, self.stderr)
class SVNImplementation(M.RepositoryImplementation):
post_receive_template = string.Template(
'#!/bin/bash\n'
'# The following line is required for site integration, do not remove/modify\n'
'curl -s $url\n')
def __init__(self, repo):
self._repo = repo
@LazyProperty
def _svn(self):
return pysvn.Client()
@LazyProperty
def _url(self):
return 'file://%s%s' % (self._repo.fs_path, self._repo.name)
def shorthand_for_commit(self, oid):
return '[r%d]' % self._revno(oid)
def url_for_commit(self, commit):
if isinstance(commit, basestring):
object_id = commit
else:
object_id = commit._id
return '%s%d/' % (
self._repo.url(), self._revno(object_id))
def init(self, default_dirs=True, skip_special_files=False):
fullname = self._setup_paths()
log.info('svn init %s', fullname)
if os.path.exists(fullname):
shutil.rmtree(fullname)
subprocess.call(['svnadmin', 'create', self._repo.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._repo.fs_path)
if not skip_special_files:
self._setup_special_files()
self._repo.status = 'ready'
# make first commit with dir structure
if default_dirs:
self._repo._impl._svn.checkout('file://'+fullname, fullname+'/tmp')
os.mkdir(fullname+'/tmp/trunk')
os.mkdir(fullname+'/tmp/tags')
os.mkdir(fullname+'/tmp/branches')
self._repo._impl._svn.add(fullname+'/tmp/trunk')
self._repo._impl._svn.add(fullname+'/tmp/tags')
self._repo._impl._svn.add(fullname+'/tmp/branches')
self._repo._impl._svn.checkin([fullname+'/tmp/trunk',fullname+'/tmp/tags',fullname+'/tmp/branches'],'Initial commit')
shutil.rmtree(fullname+'/tmp')
def clone_from(self, source_url):
'''Initialize a repo as a clone of another using svnsync'''
self.init(default_dirs=False, skip_special_files=True)
self._repo.status = 'importing'
session(self._repo).flush(self._repo)
log.info('Initialize %r as a clone of %s',
self._repo, source_url)
# Need a pre-revprop-change hook for cloning
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', 'pre-revprop-change')
with open(fn, 'wb') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0755)
def check_call(cmd):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input='p\n')
if p.returncode != 0:
raise SVNCalledProcessError(cmd, p.returncode, stdout, stderr)
check_call(['svnsync', 'init', self._url, source_url])
check_call(['svnsync', '--non-interactive', 'sync', self._url])
self._repo.status = 'analyzing'
session(self._repo).flush(self._repo)
log.info('... %r cloned, analyzing', self._repo)
self._repo.refresh(notify=False)
self._repo.status = 'ready'
log.info('... %s ready', self._repo)
session(self._repo).flush(self._repo)
self._setup_special_files()
def refresh_heads(self):
info = self._svn.info2(
self._url,
revision=pysvn.Revision(pysvn.opt_revision_kind.head),
recurse=False)[0][1]
oid = self._oid(info.rev.number)
self._repo.heads = [ Object(name=None, object_id=oid) ]
# Branches and tags aren't really supported in subversion
self._repo.branches = []
self._repo.repo_tags = []
session(self._repo).flush(self._repo)
def commit(self, rev):
if rev in ('HEAD', None):
if not self._repo.heads: return None
oid = self._repo.heads[0].object_id
elif isinstance(rev, int) or rev.isdigit():
oid = self._oid(rev)
else:
oid = rev
result = M.repo.Commit.query.get(_id=oid)
if result is None: return None
result.set_context(self._repo)
return result
def all_commit_ids(self):
"""Return a list of commit ids, starting with the root (first commit)
and ending with the most recent commit.
NB: The ForgeGit implementation returns commits in the opposite order.
"""
head_revno = self._revno(self._repo.heads[0].object_id)
return map(self._oid, range(1, head_revno+1))
def new_commits(self, all_commits=False):
head_revno = self._revno(self._repo.heads[0].object_id)
oids = [ self._oid(revno) for revno in range(1, head_revno+1) ]
if all_commits:
return oids
# Find max commit id -- everything greater than that will be "unknown"
prefix = self._oid('')
q = M.repo.Commit.query.find(
dict(
type='commit',
_id={'$gt':prefix},
),
dict(_id=True)
)
seen_oids = set()
for d in q.ming_cursor.cursor:
oid = d['_id']
if not oid.startswith(prefix): break
seen_oids.add(oid)
return [
oid for oid in oids if oid not in seen_oids ]
def refresh_commit_info(self, oid, seen_object_ids, lazy=True):
from allura.model.repo import CommitDoc, DiffInfoDoc
ci_doc = CommitDoc.m.get(_id=oid)
if ci_doc and lazy: return False
revno = self._revno(oid)
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty', oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
log_date = None
if hasattr(log_entry, 'date'):
log_date = datetime.utcfromtimestamp(log_entry.date)
user = Object(
name=log_entry.get('author', '--none--'),
email='',
date=log_date)
args = dict(
tree_id=None,
committed=user,
authored=user,
message=log_entry.get("message", "--none--"),
parent_ids=[],
child_ids=[])
if revno > 1:
args['parent_ids'] = [ self._oid(revno-1) ]
if ci_doc:
ci_doc.update(**args)
ci_doc.m.save()
else:
ci_doc = CommitDoc(dict(args, _id=oid))
try:
ci_doc.m.insert(safe=True)
except DuplicateKeyError:
if lazy: return False
# Save diff info
di = DiffInfoDoc.make(dict(_id=ci_doc._id, differences=[]))
for path in log_entry.changed_paths:
if path.action in ('A', 'M', 'R'):
try:
rhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc._id),
recurse=False)[0][1]
rhs_id = self._obj_oid(ci_doc._id, rhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes misreport deleted files (D) as
# something else (like A), causing info2() to raise a
# ClientError since the file doesn't exist in this
# revision. Set lrhs_id = None to treat like a deleted file
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
rhs_id = None
else:
rhs_id = None
if ci_doc.parent_ids and path.action in ('D', 'M', 'R'):
try:
lhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc.parent_ids[0]),
recurse=False)[0][1]
lhs_id = self._obj_oid(ci_doc._id, lhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes report new files as 'M'odified,
# causing info2() to raise ClientError since the file
# doesn't exist in the parent revision. Set lhs_id = None
# to treat like a newly added file.
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
lhs_id = None
else:
lhs_id = None
di.differences.append(dict(
name=h.really_unicode(path.path),
lhs_id=lhs_id,
rhs_id=rhs_id))
di.m.save()
return True
def compute_tree_new(self, commit, tree_path='/'):
from allura.model import repo as RM
tree_path = tree_path[:-1]
tree_id = self._tree_oid(commit._id, tree_path)
tree, isnew = RM.Tree.upsert(tree_id)
if not isnew: return tree_id
log.debug('Computing tree for %s: %s',
self._revno(commit._id), tree_path)
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for %s: %s(%s)',
self._repo, commit, tree_path)
tree.delete()
return None
log.debug('Compute tree for %d paths', len(infos))
for path, info in infos[1:]:
last_commit_id = self._oid(info['last_changed_rev'].number)
last_commit = M.repo.Commit.query.get(_id=last_commit_id)
M.repo_refresh.set_last_commit(
self._repo._id,
self._tree_oid(commit._id, path),
M.repo_refresh.get_commit_info(last_commit))
if info.kind == pysvn.node_kind.dir:
tree.tree_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
elif info.kind == pysvn.node_kind.file:
tree.blob_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
else:
assert False
session(tree).flush(tree)
return tree_id
def _tree_oid(self, commit_id, path):
data = 'tree\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _blob_oid(self, commit_id, path):
data = 'blob\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _obj_oid(self, commit_id, info):
path = info.URL[len(info.repos_root_URL):]
if info.kind == pysvn.node_kind.dir:
return self._tree_oid(commit_id, path)
else:
return self._blob_oid(commit_id, path)
def log(self, object_id, skip, count):
revno = self._revno(object_id)
result = []
while count and revno:
if skip == 0:
result.append(self._oid(revno))
count -= 1
else:
skip -= 1
revno -= 1
if revno:
return result, [ self._oid(revno) ]
else:
return result, []
def open_blob(self, blob):
data = self._svn.cat(
self._url + blob.path(),
revision=self._revision(blob.commit._id))
return StringIO(data)
def blob_size(self, blob):
try:
data = self._svn.list(
self._url + blob.path(),
revision=self._revision(blob.commit._id),
dirent_fields=pysvn.SVN_DIRENT_SIZE)
except pysvn.ClientError:
log.info('ClientError getting filesize %r %r, returning 0', blob.path(), self._repo, exc_info=True)
return 0
try:
size = data[0][0]['size']
except (IndexError, KeyError):
log.info('Error getting filesize: bad data from svn client %r %r, returning 0', blob.path(), self._repo, exc_info=True)
size = 0
return size
def _setup_hooks(self):
'Set up the post-commit and pre-revprop-change hooks'
text = self.post_receive_template.substitute(
url=tg.config.get('base_url', 'http://localhost:8080')
+ '/auth/refresh_repo' + self._repo.url())
fn = os.path.join(self._repo.fs_path, self._repo.name, 'hooks', 'post-commit')
with open(fn, 'wb') as fp:
fp.write(text)
os.chmod(fn, 0755)
fn = os.path.join(self._repo.fs_path, self._repo.name, 'hooks', 'pre-revprop-change')
with open(fn, 'wb') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0755)
def _revno(self, oid):
return int(oid.split(':')[1])
def _revision(self, oid):
return pysvn.Revision(
pysvn.opt_revision_kind.number,
self._revno(oid))
def _oid(self, revno):
return '%s:%s' % (self._repo._id, revno)
Mapper.compile_all()
|
robwarm/gpaw-symm
|
refs/heads/master
|
doc/tutorials/hubbardu/n.py
|
1
|
from ase import Atoms
from gpaw import GPAW
n = Atoms('N', magmoms=[3])
n.center(vacuum=3.5)
# Calculation with no +U correction:
n.calc = GPAW(mode='lcao',
basis='dzp',
txt='no_u.txt',
xc='PBE')
e1 = n.get_potential_energy()
# Calculation with a correction U=6 eV normalized:
n.calc.set(setups={'N': ':p,6.0'}, txt='normalized_u.txt')
e2 = n.get_potential_energy()
# Calculation with a correction U=6 eV not normalized:
n.calc.set(setups={'N': ':p,6.0,0'}, txt='not_normalized_u.txt')
e3 = n.get_potential_energy()
|
doduytrung/odoo-8.0
|
refs/heads/master
|
addons/hr_attendance/wizard/hr_attendance_error.py
|
377
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_attendance_error(osv.osv_memory):
_name = 'hr.attendance.error'
_description = 'Print Error Attendance Report'
_columns = {
'init_date': fields.date('Starting Date', required=True),
'end_date': fields.date('Ending Date', required=True),
'max_delay': fields.integer('Max. Delay (Min)', required=True)
}
_defaults = {
'init_date': lambda *a: time.strftime('%Y-%m-%d'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'max_delay': 120,
}
def print_report(self, cr, uid, ids, context=None):
emp_ids = []
data_error = self.read(cr, uid, ids, context=context)[0]
date_from = data_error['init_date']
date_to = data_error['end_date']
cr.execute("SELECT id FROM hr_attendance WHERE employee_id IN %s AND to_char(name,'YYYY-mm-dd')<=%s AND to_char(name,'YYYY-mm-dd')>=%s AND action IN %s ORDER BY name" ,(tuple(context['active_ids']), date_to, date_from, tuple(['sign_in','sign_out'])))
attendance_ids = [x[0] for x in cr.fetchall()]
if not attendance_ids:
raise osv.except_osv(_('No Data Available!'), _('No records are found for your selection!'))
attendance_records = self.pool.get('hr.attendance').browse(cr, uid, attendance_ids, context=context)
for rec in attendance_records:
if rec.employee_id.id not in emp_ids:
emp_ids.append(rec.employee_id.id)
data_error['emp_ids'] = emp_ids
datas = {
'ids': [],
'model': 'hr.employee',
'form': data_error
}
return self.pool['report'].get_action(
cr, uid, [], 'hr_attendance.report_attendanceerrors', data=datas, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.