repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
mingzhaodotname/teletraan
|
refs/heads/master
|
deploy-agent/deployd/client/base_client.py
|
6
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
class BaseClient:
"""This class plays a role as an interface defining methods for agent to
communicate with teletraan service.
"""
__metaclass__ = ABCMeta
@abstractmethod
def send_reports(self, env_reports=None):
"""Args:
env_reports: a dict with env name as key and DeployStatus as value.
Returns:
PingResponse describing next action for deploy agent.
"""
pass
|
hugs/django
|
refs/heads/master
|
django/contrib/gis/tests/test_gdal_envelope.py
|
9
|
import unittest
from django.contrib.gis.gdal import Envelope, OGRException
class EnvelopeTest(unittest.TestCase):
def test01_init(self):
"Testing Envelope initilization."
e1 = Envelope((0, 0, 5, 5))
e2 = Envelope(0, 0, 5, 5)
e3 = Envelope(0, '0', '5', 5) # Thanks to ww for this
e4 = Envelope(e1._envelope)
self.assertRaises(OGRException, Envelope, (5, 5, 0, 0))
self.assertRaises(OGRException, Envelope, 5, 5, 0, 0)
self.assertRaises(OGRException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(OGRException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, u'foo')
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(EnvelopeTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
NexusIS/libcloud
|
refs/heads/trunk
|
libcloud/backup/drivers/gce.py
|
31
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'GCEBackupDriver'
]
from libcloud.utils.iso8601 import parse_date
from libcloud.backup.base import BackupDriver, BackupTargetRecoveryPoint,\
BackupTargetJob, BackupTarget
from libcloud.backup.types import BackupTargetType, BackupTargetJobStatusType
from libcloud.common.google import GoogleResponse, GoogleBaseConnection
API_VERSION = 'v1'
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
class GCEResponse(GoogleResponse):
pass
class GCEConnection(GoogleBaseConnection):
"""
Connection class for the GCE driver.
GCEConnection extends :class:`google.GoogleBaseConnection` for 2 reasons:
1. modify request_path for GCE URI.
2. Implement gce_params functionality described below.
If the parameter gce_params is set to a dict prior to calling request(),
the URL parameters will be updated to include those key/values FOR A
SINGLE REQUEST. If the response contains a nextPageToken,
gce_params['pageToken'] will be set to its value. This can be used to
implement paging in list:
>>> params, more_results = {'maxResults': 2}, True
>>> while more_results:
... driver.connection.gce_params=params
... driver.ex_list_urlmaps()
... more_results = 'pageToken' in params
...
[<GCEUrlMap id="..." name="cli-map">, <GCEUrlMap id="..." name="lc-map">]
[<GCEUrlMap id="..." name="web-map">]
"""
host = 'www.googleapis.com'
responseCls = GCEResponse
def __init__(self, user_id, key, secure, auth_type=None,
credential_file=None, project=None, **kwargs):
super(GCEConnection, self).__init__(user_id, key, secure=secure,
auth_type=auth_type,
credential_file=credential_file,
**kwargs)
self.request_path = '/compute/%s/projects/%s' % (API_VERSION,
project)
self.gce_params = None
def pre_connect_hook(self, params, headers):
"""
Update URL parameters with values from self.gce_params.
@inherits: :class:`GoogleBaseConnection.pre_connect_hook`
"""
params, headers = super(GCEConnection, self).pre_connect_hook(params,
headers)
if self.gce_params:
params.update(self.gce_params)
return params, headers
def request(self, *args, **kwargs):
"""
Perform request then do GCE-specific processing of URL params.
@inherits: :class:`GoogleBaseConnection.request`
"""
response = super(GCEConnection, self).request(*args, **kwargs)
# If gce_params has been set, then update the pageToken with the
# nextPageToken so it can be used in the next request.
if self.gce_params:
if 'nextPageToken' in response.object:
self.gce_params['pageToken'] = response.object['nextPageToken']
elif 'pageToken' in self.gce_params:
del self.gce_params['pageToken']
self.gce_params = None
return response
class GCEBackupDriver(BackupDriver):
name = 'Google Compute Engine Backup Driver'
website = 'http://cloud.google.com/'
connectionCls = GCEConnection
def __init__(self, user_id, key=None, project=None,
auth_type=None, scopes=None, credential_file=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword project: Your GCE project name. (required)
:type project: ``str``
:keyword auth_type: Accepted values are "SA" or "IA" or "GCE"
("Service Account" or "Installed Application" or
"GCE" if libcloud is being used on a GCE instance
with service account enabled).
If not supplied, auth_type will be guessed based
on value of user_id or if the code is being
executed in a GCE instance.
:type auth_type: ``str``
:keyword scopes: List of authorization URLs. Default is empty and
grants read/write to Compute, Storage, DNS.
:type scopes: ``list``
:keyword credential_file: Path to file for caching authentication
information used by GCEConnection.
:type credential_file: ``str``
"""
if not project:
raise ValueError('Project name must be specified using '
'"project" keyword.')
self.auth_type = auth_type
self.project = project
self.scopes = scopes
self.credential_file = credential_file or \
'~/.gce_libcloud_auth' + '.' + self.project
super(GCEBackupDriver, self).__init__(user_id, key, **kwargs)
# Cache Zone and Region information to reduce API calls and
# increase speed
self.base_path = '/compute/%s/projects/%s' % (API_VERSION,
self.project)
def get_supported_target_types(self):
"""
Get a list of backup target types this driver supports
:return: ``list`` of :class:``BackupTargetType``
"""
return [BackupTargetType.VOLUME]
def list_targets(self):
"""
List all backuptargets
:rtype: ``list`` of :class:`BackupTarget`
"""
raise NotImplementedError(
'list_targets not implemented for this driver')
def create_target(self, name, address,
type=BackupTargetType.VOLUME, extra=None):
"""
Creates a new backup target
:param name: Name of the target
:type name: ``str``
:param address: The volume ID.
:type address: ``str``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Does nothing since any volume can be snapped at anytime.
return self.ex_get_target_by_source(address)
def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL,
extra=None):
"""
Creates a new backup target from an existing node
:param node: The Node to backup
:type node: ``Node``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Get the first persistent disk
disks = node.extra['disks']
if disks is not None:
return self.create_target(
name=node.name,
address=disks[0]['source'],
type=BackupTargetType.VOLUME,
extra=None)
else:
raise RuntimeError("Node does not have any block devices")
def create_target_from_container(self, container,
type=BackupTargetType.OBJECT,
extra=None):
"""
Creates a new backup target from an existing storage container
:param node: The Container to backup
:type node: ``Container``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
raise NotImplementedError(
'create_target_from_container not implemented for this driver')
def update_target(self, target, name, address, extra):
"""
Update the properties of a backup target
:param target: Backup target to update
:type target: Instance of :class:`BackupTarget`
:param name: Name of the target
:type name: ``str``
:param address: Hostname, FQDN, IP, file path etc.
:type address: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Does nothing since any volume can be snapped at anytime.
return self.ex_get_target_by_source(address)
def delete_target(self, target):
"""
Delete a backup target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
"""
raise NotImplementedError(
'delete_target not implemented for this driver')
def list_recovery_points(self, target, start_date=None, end_date=None):
"""
List the recovery points available for a target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param start_date: The start date to show jobs between (optional)
:type start_date: :class:`datetime.datetime`
:param end_date: The end date to show jobs between (optional)
:type end_date: :class:`datetime.datetime``
:rtype: ``list`` of :class:`BackupTargetRecoveryPoint`
"""
request = '/global/snapshots'
response = self.connection.request(request, method='GET').object
return self._to_recovery_points(response, target)
def recover_target(self, target, recovery_point, path=None):
"""
Recover a backup target to a recovery point
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target not implemented for this driver')
def recover_target_out_of_place(self, target, recovery_point,
recovery_target, path=None):
"""
Recover a backup target to a recovery point out-of-place
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param recovery_target: Backup target with to recover the data to
:type recovery_target: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target_out_of_place not implemented for this driver')
def get_target_job(self, target, id):
"""
Get a specific backup job by ID
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param id: Backup target with the backup data
:type id: Instance of :class:`BackupTarget`
:rtype: :class:`BackupTargetJob`
"""
jobs = self.list_target_jobs(target)
return list(filter(lambda x: x.id == id, jobs))[0]
def list_target_jobs(self, target):
"""
List the backup jobs on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:rtype: ``list`` of :class:`BackupTargetJob`
"""
return []
def create_target_job(self, target, extra=None):
"""
Create a new backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTargetJob`
"""
name = target.name
request = '/zones/%s/disks/%s/createSnapshot' % (
target.extra['zone'].name, target.name)
snapshot_data = {
'source': target.extra['source']
}
self.connection.async_request(request, method='POST',
data=snapshot_data)
return self._to_job(self.ex_get_snapshot(name), target)
def resume_target_job(self, target, job):
"""
Resume a suspended backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to resume
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'resume_target_job not supported for this driver')
def suspend_target_job(self, target, job):
"""
Suspend a running backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to suspend
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'suspend_target_job not supported for this driver')
def cancel_target_job(self, target, job):
"""
Cancel a backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to cancel
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'cancel_target_job not supported for this driver')
def _to_recovery_points(self, data, target):
return [self._to_recovery_point(item, target)
for item in data.items]
def _to_recovery_point(self, item, target):
id = item.id
date = parse_date(item.creationTimestamp)
point = BackupTargetRecoveryPoint(
id=id,
date=date,
target=target,
driver=self.connection.driver,
extra={
'snapshot-id': id,
},
)
return point
def _to_jobs(self, data, target):
return [self._to_job(item, target)
for item in data.items]
def _to_job(self, item, target):
id = item.id
job = BackupTargetJob(
id=id,
status=BackupTargetJobStatusType.PENDING,
progress=0,
target=target,
driver=self.connection.driver,
extra={
},
)
return job
def ex_get_snapshot(self, name):
request = '/global/snapshots/%s' % (name)
response = self.connection.request(request, method='GET').object
return response
def ex_get_target_by_source(self, source):
return BackupTarget(
id=source,
name=source,
address=source,
type=BackupTargetType.VOLUME,
driver=self.connection.driver,
extra={
"source": source
}
)
|
zefredzocohen1/maps-ci
|
refs/heads/master
|
public1/assets/plugins/ionicons/builder/generate.py
|
357
|
from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "ben@drifty.com",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "adam@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "max@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <ben@drifty.com>",
"Adam Bradley <adam@drifty.com>",
"Max Lynch <max@drifty.com>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
|
gengue/django
|
refs/heads/master
|
tests/forms_tests/tests/test_fields.py
|
5
|
# -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
def test_charfield_strip(self):
"""
Ensure that values have whitespace stripped and that strip=False works.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1.5')
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertIsInstance(f.clean('1'), Decimal)
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
@ignore_warnings(category=RemovedInDjango110Warning) # for _has_changed
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
# Test for deprecated behavior _has_changed
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
@ignore_warnings(category=RemovedInDjango110Warning) # error_message deprecation
def test_regexfield_4(self):
f = RegexField('^[0-9][0-9][0-9][0-9]$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('local@domain.with.idn.xyz\xe4\xf6\xfc\xdfabc.part.com',
f.clean('local@domain.with.idn.xyzäöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = 'viewx3dtextx26qx3d@yahoo.comx26latlngx3d15854521645943074058'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertEqual('example@example.com', f.clean(' example@example.com \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, 'a@foo.com')
self.assertEqual('alf@foo.com', f.clean('alf@foo.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, 'alf123456788@foo.com')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.bmp'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
self.assertFalse(f.has_changed('1', '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3')
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertIsNone(f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
taaviteska/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/3_squashed_5.py
|
266
|
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
patochectp/navitia
|
refs/heads/dev
|
source/tyr/tests/tests_default_settings.py
|
5
|
import os
# Path to the directory where the configuration file of each instance of ed are defined
INSTANCES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
# Validate the presence of a mx record on the domain
EMAIL_CHECK_MX = False
# Validate the email by connecting to the smtp server, but doesn't send an email
EMAIL_CHECK_SMTP = False
|
axelkennedal/dissen
|
refs/heads/master
|
dissenEnv/lib/python3.5/site-packages/django/core/management/sql.py
|
399
|
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
|
sridevikoushik31/openstack
|
refs/heads/T11906
|
nova/tests/test_libvirt_vif.py
|
7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.TestCase):
net_bridge = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'br0',
'bridge_interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
net_bridge_quantum = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge_interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_bridge = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_BRIDGE,
}
mapping_bridge_quantum = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
}
net_ovs = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'br0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_ovs = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_ovs['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_OVS,
'ovs_interfaceid': 'aaa-bbb-ccc',
}
mapping_ovs_legacy = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_ovs['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
}
net_8021 = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_8021qbh = {
'mac': 'ca:fe:de:ad:be:ef',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_802_QBH,
'qbh_params': network_model.VIF8021QbhParams(
profileid="xxx-yyy-zzz"),
}
mapping_8021qbg = {
'mac': 'ca:fe:de:ad:be:ef',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_802_QBG,
'qbg_params': network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff")
}
mapping_none = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
}
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def _get_instance_xml(self, driver, net, mapping, image_meta=None):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
nic = driver.get_config(self.instance, net, mapping, image_meta)
conf.add_device(nic)
return conf.to_xml()
def test_multiple_nics(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(libvirt_use_virtio_for_bridges=False,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("model")
self.assertEqual(len(ret), 0)
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "virtio")
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm_custom(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
image_meta = {'properties': {'hw_vif_model': 'e1000'}}
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge,
image_meta)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "e1000")
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm_bogus(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
image_meta = {'properties': {'hw_vif_model': 'acme'}}
self.assertRaises(exception.UnsupportedHardware,
self._get_instance_xml,
d,
self.net_bridge,
self.mapping_bridge,
image_meta)
def test_model_qemu(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "virtio")
driver = node.find("driver").get("name")
self.assertEqual(driver, "qemu")
def test_model_xen(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='xen')
def get_connection():
return fakelibvirt.Connection("xen:///system",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("model")
self.assertEqual(len(ret), 0)
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_generic_driver_none(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.net_bridge,
self.mapping_none)
def _check_bridge_driver(self, d, net, mapping, br_want):
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_bridge['mac'])
def test_bridge_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtBridgeDriver(get_connection)
self._check_bridge_driver(d,
self.net_bridge,
self.mapping_bridge,
self.net_bridge['bridge'])
def test_generic_driver_bridge(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
self._check_bridge_driver(d,
self.net_bridge,
self.mapping_bridge,
self.net_bridge['bridge'])
def test_quantum_bridge_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.QuantumLinuxBridgeVIFDriver(get_connection)
br_want = 'brq' + self.net_bridge_quantum['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_bridge_driver(d,
self.net_bridge_quantum,
self.mapping_bridge_quantum,
br_want)
def _check_ovs_ethernet_driver(self, d, net, mapping, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "ethernet")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith(dev_prefix))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_ovs['mac'])
script = node.find("script").get("path")
self.assertEquals(script, "")
def test_ovs_ethernet_driver_legacy(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9010)
d = vif.LibvirtOpenVswitchDriver(get_connection)
self._check_ovs_ethernet_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
"nic")
def test_ovs_ethernet_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9010)
d = vif.LibvirtGenericVIFDriver(get_connection)
self._check_ovs_ethernet_driver(d,
self.net_ovs,
self.mapping_ovs,
"tap")
def _check_ovs_virtualport_driver(self, d, net, mapping, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
self.assertEqual(mac, mapping['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_ovs_virtualport_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9011)
d = vif.LibvirtOpenVswitchVirtualPortDriver(get_connection)
want_iface_id = 'vif-xxx-yyy-zzz'
self._check_ovs_virtualport_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
want_iface_id)
def test_generic_ovs_virtualport_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9011)
d = vif.LibvirtGenericVIFDriver(get_connection)
want_iface_id = self.mapping_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.net_ovs,
self.mapping_ovs,
want_iface_id)
def _check_quantum_hybrid_driver(self, d, net, mapping, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
mac = node.find("mac").get("address")
self.assertEqual(mac, mapping['mac'])
def test_quantum_hybrid_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
br_want = "qbr" + self.mapping_ovs['vif_uuid']
br_want = br_want[:network_model.NIC_NAME_LEN]
d = vif.LibvirtHybridOVSBridgeDriver(get_connection)
self._check_quantum_hybrid_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
br_want)
def test_generic_hybrid_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
br_want = "qbr" + self.mapping_ovs['vif_uuid']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_quantum_hybrid_driver(d,
self.net_ovs,
self.mapping_ovs,
br_want)
def test_generic_8021qbh_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_8021,
self.mapping_8021qbh)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "direct")
br_name = node.find("source").get("dev")
self.assertEqual(br_name, "eth0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_8021qbh['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.mapping_8021qbh['qbh_params']
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
wantparams['profileid'])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_generic_8021qbg_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_8021,
self.mapping_8021qbg)
doc = etree.fromstring(xml)
print xml
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "direct")
br_name = node.find("source").get("dev")
self.assertEqual(br_name, "eth0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_8021qbg['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.mapping_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
|
philipbl/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/glances.py
|
4
|
"""
Support gathering system information of hosts which are running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, STATE_UNKNOWN, CONF_NAME, CONF_RESOURCES)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'api/2/all'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES = {
'disk_use_percent': ['Disk Use', '%'],
'disk_use': ['Disk Use', 'GiB'],
'disk_free': ['Disk Free', 'GiB'],
'memory_use_percent': ['RAM Use', '%'],
'memory_use': ['RAM Use', 'MiB'],
'memory_free': ['RAM Free', 'MiB'],
'swap_use_percent': ['Swap Use', '%'],
'swap_use': ['Swap Use', 'GiB'],
'swap_free': ['Swap Free', 'GiB'],
'processor_load': ['CPU Load', '15 min'],
'process_running': ['Running', 'Count'],
'process_total': ['Total', 'Count'],
'process_thread': ['Thread', 'Count'],
'process_sleeping': ['Sleeping', 'Count']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Glances sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}/{}'.format(host, port, _RESOURCE)
var_conf = config.get(CONF_RESOURCES)
try:
response = requests.get(url, timeout=10)
if not response.ok:
_LOGGER.error("Response status is '%s'", response.status_code)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to resource/endpoint: %s", url)
return False
rest = GlancesData(url)
dev = []
for resource in var_conf:
dev.append(GlancesSensor(rest, name, resource))
add_devices(dev)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, rest, name, sensor_type):
"""Initialize the sensor."""
self.rest = rest
self._name = name
self.type = sensor_type
self._state = STATE_UNKNOWN
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""The name of the sensor."""
if self._name is None:
return SENSOR_TYPES[self.type][0]
else:
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
# pylint: disable=too-many-return-statements
@property
def state(self):
"""Return the state of the resources."""
value = self.rest.data
if value is not None:
if self.type == 'disk_use_percent':
return value['fs'][0]['percent']
elif self.type == 'disk_use':
return round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
return round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
return round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
return value['mem']['percent']
elif self.type == 'memory_use':
return round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
return round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
return value['memswap']['percent']
elif self.type == 'swap_use':
return round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
return round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
return value['load']['min15']
elif self.type == 'process_running':
return value['processcount']['running']
elif self.type == 'process_total':
return value['processcount']['total']
elif self.type == 'process_thread':
return value['processcount']['thread']
elif self.type == 'process_sleeping':
return value['processcount']['sleeping']
def update(self):
"""Get the latest data from REST API."""
self.rest.update()
class GlancesData(object):
"""The class for handling the data retrieval."""
def __init__(self, resource):
"""Initialize the data object."""
self._resource = resource
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Glances REST API."""
try:
response = requests.get(self._resource, timeout=10)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to host/endpoint: %s", self._resource)
self.data = None
|
marcelometal/thumbor
|
refs/heads/master
|
thumbor/detectors/local_detector.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from os.path import join, dirname, abspath, isabs
import cv2
import numpy as np
from thumbor.point import FocalPoint
from thumbor.detectors import BaseDetector
class CascadeLoaderDetector(BaseDetector):
def load_cascade_file(self, module_path, cascade_file_path):
if not hasattr(self.__class__, 'cascade'):
if isabs(cascade_file_path):
cascade_file = cascade_file_path
else:
cascade_file = join(abspath(dirname(module_path)), cascade_file_path)
self.__class__.cascade = cv2.CascadeClassifier(cascade_file)
def get_min_size_for(self, size):
ratio = int(min(size) / 15)
ratio = max(20, ratio)
return (ratio, ratio)
def get_features(self):
engine = self.context.modules.engine
img = np.array(
engine.convert_to_grayscale(
update_image=False,
with_alpha=False
)
)
faces = self.__class__.cascade.detectMultiScale(
img,
1.2,
4,
minSize=self.get_min_size_for(engine.size)
)
faces_scaled = []
for (x, y, w, h) in faces:
faces_scaled.append((
(
x.item(),
y.item(),
w.item(),
h.item()
), 0)
)
return faces_scaled
def detect(self, callback):
features = self.get_features()
if features:
for square, neighbors in features:
self.context.request.focal_points.append(FocalPoint.from_square(*square))
callback()
else:
self.next(callback)
|
githubutilities/LeetCode
|
refs/heads/master
|
Python/plus-one.py
|
3
|
# Time: O(n)
# Space: O(1)
#
# Given a non-negative number represented as an array of digits, plus one to the number.
#
# The digits are stored such that the most significant digit is at the head of the list.
class Solution:
"""
:type digits: List[int]
:rtype: List[int]
"""
def plusOne(self, digits):
carry = 1
for i in reversed(xrange(len(digits))):
digits[i] += carry
carry = digits[i] / 10
digits[i] %= 10
if carry:
digits = [1] + digits
return digits
def plusOne2(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits = [str(x) for x in digits]
num = int(''.join(digits)) + 1
return [int(x) for x in str(num)]
if __name__ == "__main__":
print Solution().plusOne([9, 9, 9, 9])
|
arenadata/bigtop
|
refs/heads/branch-adh-2.0
|
bigtop-packages/src/charm/hadoop/layer-hadoop-plugin/reactive/apache_bigtop_plugin.py
|
12
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import is_state, remove_state, set_state, when, when_any, when_none, when_not
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import Bigtop, get_hadoop_version
@when('hadoop-plugin.joined')
@when_not('namenode.joined')
def blocked(principal):
hookenv.status_set('blocked', 'missing required namenode relation')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined')
@when_not('apache-bigtop-plugin.hdfs.installed')
def install_hadoop_client_hdfs(principal, namenode):
"""Install if the namenode has sent its FQDN.
We only need the namenode FQDN to perform the plugin install, so poll for
namenodes() data whenever we have a namenode relation. This allows us to
install asap, even if 'namenode.ready' is not set yet.
"""
if namenode.namenodes():
hookenv.status_set('maintenance', 'installing plugin (hdfs)')
nn_host = namenode.namenodes()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.hdfs.installed')
hookenv.application_version_set(get_hadoop_version())
hookenv.status_set('maintenance', 'plugin (hdfs) installed')
else:
hookenv.status_set('waiting', 'waiting for namenode fqdn')
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.joined')
@when_not('namenode.ready')
def send_nn_spec(principal, namenode):
"""Send our plugin spec so the namenode can become ready."""
bigtop = Bigtop()
# Send plugin spec (must match NN spec for 'namenode.ready' to be set)
namenode.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.hdfs.installed')
@when('hadoop-plugin.joined', 'namenode.ready')
@when_not('apache-bigtop-plugin.hdfs.ready')
def send_principal_hdfs_info(principal, namenode):
"""Send HDFS data when the namenode becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_hdfs_ready(namenode.namenodes(), namenode.port())
set_state('apache-bigtop-plugin.hdfs.ready')
@when('apache-bigtop-plugin.hdfs.ready')
@when('hadoop-plugin.joined')
@when_not('namenode.ready')
def clear_hdfs_ready(principal):
principal.clear_hdfs_ready()
remove_state('apache-bigtop-plugin.hdfs.ready')
remove_state('apache-bigtop-plugin.hdfs.installed')
@when('bigtop.available', 'hadoop-plugin.joined', 'namenode.joined', 'resourcemanager.joined')
@when_not('apache-bigtop-plugin.yarn.installed')
def install_hadoop_client_yarn(principal, namenode, resourcemanager):
if namenode.namenodes() and resourcemanager.resourcemanagers():
hookenv.status_set('maintenance', 'installing plugin (yarn)')
nn_host = namenode.namenodes()[0]
rm_host = resourcemanager.resourcemanagers()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.yarn.installed')
hookenv.status_set('maintenance', 'plugin (yarn) installed')
else:
hookenv.status_set('waiting', 'waiting for master fqdns')
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.joined')
@when_not('resourcemanager.ready')
def send_rm_spec(principal, resourcemanager):
"""Send our plugin spec so the resourcemanager can become ready."""
bigtop = Bigtop()
resourcemanager.set_local_spec(bigtop.spec())
@when('apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined', 'resourcemanager.ready')
@when_not('apache-bigtop-plugin.yarn.ready')
def send_principal_yarn_info(principal, resourcemanager):
"""Send YARN data when the resourcemanager becomes ready."""
principal.set_installed(get_hadoop_version())
principal.set_yarn_ready(
resourcemanager.resourcemanagers(), resourcemanager.port(),
resourcemanager.hs_http(), resourcemanager.hs_ipc())
set_state('apache-bigtop-plugin.yarn.ready')
@when('apache-bigtop-plugin.yarn.ready')
@when('hadoop-plugin.joined')
@when_not('resourcemanager.ready')
def clear_yarn_ready(principal):
principal.clear_yarn_ready()
remove_state('apache-bigtop-plugin.yarn.ready')
remove_state('apache-bigtop-plugin.yarn.installed')
@when_any('apache-bigtop-plugin.hdfs.installed', 'apache-bigtop-plugin.yarn.installed')
@when('hadoop-plugin.joined')
@when_none('namenode.spec.mismatch', 'resourcemanager.spec.mismatch')
def update_status(principal):
hdfs_rel = is_state('namenode.joined')
yarn_rel = is_state('resourcemanager.joined')
hdfs_ready = is_state('namenode.ready')
yarn_ready = is_state('resourcemanager.ready')
if not (hdfs_rel or yarn_rel):
hookenv.status_set('blocked',
'missing namenode and/or resourcemanager relation')
elif hdfs_rel and not hdfs_ready:
hookenv.status_set('waiting', 'waiting for hdfs')
elif yarn_rel and not yarn_ready:
hookenv.status_set('waiting', 'waiting for yarn')
else:
ready = []
if hdfs_ready:
ready.append('hdfs')
if yarn_ready:
ready.append('yarn')
hookenv.status_set('active', 'ready ({})'.format(' & '.join(ready)))
|
technologiescollege/s2a_fr
|
refs/heads/portable
|
s2a/Python/Lib/test/test_atexit.py
|
135
|
import sys
import unittest
import StringIO
import atexit
from imp import reload
from test import test_support
class TestCase(unittest.TestCase):
def setUp(self):
s = StringIO.StringIO()
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
sys.stdout = sys.stderr = self.subst_io = s
self.save_handlers = atexit._exithandlers
atexit._exithandlers = []
def tearDown(self):
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
atexit._exithandlers = self.save_handlers
def test_args(self):
atexit.register(self.h1)
atexit.register(self.h4)
atexit.register(self.h4, 4, kw="abc")
atexit._run_exitfuncs()
self.assertEqual(self.subst_io.getvalue(),
"h4 (4,) {'kw': 'abc'}\nh4 () {}\nh1\n")
def test_badargs(self):
atexit.register(lambda: 1, 0, 0, (x for x in (1,2)), 0, 0)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_order(self):
atexit.register(self.h1)
atexit.register(self.h2)
atexit.register(self.h3)
atexit._run_exitfuncs()
self.assertEqual(self.subst_io.getvalue(), "h3\nh2\nh1\n")
def test_sys_override(self):
# be sure a preset sys.exitfunc is handled properly
exfunc = sys.exitfunc
sys.exitfunc = self.h1
reload(atexit)
try:
atexit.register(self.h2)
atexit._run_exitfuncs()
finally:
sys.exitfunc = exfunc
self.assertEqual(self.subst_io.getvalue(), "h2\nh1\n")
def test_raise(self):
atexit.register(self.raise1)
atexit.register(self.raise2)
self.assertRaises(TypeError, atexit._run_exitfuncs)
### helpers
def h1(self):
print "h1"
def h2(self):
print "h2"
def h3(self):
print "h3"
def h4(self, *args, **kwargs):
print "h4", args, kwargs
def raise1(self):
raise TypeError
def raise2(self):
raise SystemError
def test_main():
test_support.run_unittest(TestCase)
if __name__ == "__main__":
test_main()
|
eonpatapon/neutron
|
refs/heads/master
|
neutron/plugins/embrane/plugins/embrane_ml2_plugin.py
|
59
|
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import extraroute_db
from neutron.db import l3_dvr_db
from neutron.db import l3_gwmode_db
from neutron.plugins.embrane import base_plugin as base
from neutron.plugins.embrane.l2base.ml2 import ml2_support
from neutron.plugins.ml2 import plugin as l2
class EmbraneMl2Plugin(base.EmbranePlugin, l2.Ml2Plugin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
extraroute_db.ExtraRoute_db_mixin):
'''EmbraneMl2Plugin.
This plugin uses Modular Layer 2 plugin for providing L2 networks
and the base EmbranePlugin for L3.
'''
_plugin_support = ml2_support.Ml2Support()
def __init__(self):
'''First run plugin specific initialization, then Embrane's.'''
self._supported_extension_aliases.extend(["router", "extraroute",
"ext-gw-mode"])
l2.Ml2Plugin.__init__(self)
self._run_embrane_config()
|
mmetak/streamlink
|
refs/heads/master
|
src/streamlink/packages/requests_file.py
|
6
|
"""
Copyright 2015 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from io import BytesIO
import sys
from requests.adapters import BaseAdapter
from requests.compat import urlparse, unquote, urljoin
from requests import Response, codes
import errno
import os
import os.path
import stat
import locale
import io
from streamlink.compat import is_win32, is_py3
class FileAdapter(BaseAdapter):
def send(self, request, **kwargs):
""" Wraps a file, described in request, in a Response object.
:param request: The PreparedRequest` being "sent".
:returns: a Response object containing the file
"""
# Check that the method makes sense. Only support GET
if request.method not in ("GET", "HEAD"):
raise ValueError("Invalid request method %s" % request.method)
# Parse the URL
url_parts = urlparse(request.url)
# Make the Windows URLs slightly nicer
if is_win32 and url_parts.netloc.endswith(":"):
url_parts = url_parts._replace(path="/" + url_parts.netloc + url_parts.path, netloc='')
# Reject URLs with a hostname component
if url_parts.netloc and url_parts.netloc not in ("localhost", ".", "..", "-"):
raise ValueError("file: URLs with hostname components are not permitted")
# If the path is relative update it to be absolute
if url_parts.netloc in (".", ".."):
pwd = os.path.abspath(url_parts.netloc).replace(os.sep, "/") + "/"
if is_win32:
# prefix the path with a / in Windows
pwd = "/" + pwd
url_parts = url_parts._replace(path=urljoin(pwd, url_parts.path.lstrip("/")))
resp = Response()
resp.url = request.url
# Open the file, translate certain errors into HTTP responses
# Use urllib's unquote to translate percent escapes into whatever
# they actually need to be
try:
# If the netloc is - then read from stdin
if url_parts.netloc == "-":
if is_py3:
resp.raw = sys.stdin.buffer
else:
resp.raw = sys.stdin
# make a fake response URL, the current directory
resp.url = "file://" + os.path.abspath(".").replace(os.sep, "/") + "/"
else:
# Split the path on / (the URL directory separator) and decode any
# % escapes in the parts
path_parts = [unquote(p) for p in url_parts.path.split('/')]
# Strip out the leading empty parts created from the leading /'s
while path_parts and not path_parts[0]:
path_parts.pop(0)
# If os.sep is in any of the parts, someone fed us some shenanigans.
# Treat is like a missing file.
if any(os.sep in p for p in path_parts):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))
# Look for a drive component. If one is present, store it separately
# so that a directory separator can correctly be added to the real
# path, and remove any empty path parts between the drive and the path.
# Assume that a part ending with : or | (legacy) is a drive.
if path_parts and (path_parts[0].endswith('|') or
path_parts[0].endswith(':')):
path_drive = path_parts.pop(0)
if path_drive.endswith('|'):
path_drive = path_drive[:-1] + ':'
while path_parts and not path_parts[0]:
path_parts.pop(0)
else:
path_drive = ''
# Try to put the path back together
# Join the drive back in, and stick os.sep in front of the path to
# make it absolute.
path = path_drive + os.sep + os.path.join(*path_parts)
# Check if the drive assumptions above were correct. If path_drive
# is set, and os.path.splitdrive does not return a drive, it wasn't
# reall a drive. Put the path together again treating path_drive
# as a normal path component.
if path_drive and not os.path.splitdrive(path):
path = os.sep + os.path.join(path_drive, *path_parts)
# Use io.open since we need to add a release_conn method, and
# methods can't be added to file objects in python 2.
resp.raw = io.open(path, "rb")
resp.raw.release_conn = resp.raw.close
except IOError as e:
if e.errno == errno.EACCES:
resp.status_code = codes.forbidden
elif e.errno == errno.ENOENT:
resp.status_code = codes.not_found
else:
resp.status_code = codes.bad_request
# Wrap the error message in a file-like object
# The error message will be localized, try to convert the string
# representation of the exception into a byte stream
resp_str = str(e).encode(locale.getpreferredencoding(False))
resp.raw = BytesIO(resp_str)
resp.headers['Content-Length'] = len(resp_str)
# Add release_conn to the BytesIO object
resp.raw.release_conn = resp.raw.close
else:
resp.status_code = codes.ok
# If it's a regular file, set the Content-Length
resp_stat = os.fstat(resp.raw.fileno())
if stat.S_ISREG(resp_stat.st_mode):
resp.headers['Content-Length'] = resp_stat.st_size
return resp
def close(self):
pass
|
humanitiesplusdesign/athanasius_old
|
refs/heads/master
|
wsgi-scripts/config_example.py
|
1
|
host = "localhost"
user = "ourbricks_user"
database = "rofl"
pw = "foobar"
|
edx/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_footer.py
|
4
|
"""
Tests related to the basic footer-switching based off SITE_NAME to ensure
edx.org uses an edx footer but other instances use an Open edX footer.
"""
import unittest
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestFooter(TestCase):
"""
Tests for edx and OpenEdX footer
"""
SOCIAL_MEDIA_NAMES = [
"facebook",
"instagram",
"twitter",
"linkedin",
"tumblr",
"meetup",
"reddit",
"youtube",
]
SOCIAL_MEDIA_URLS = {
"facebook": "http://www.facebook.com/",
"instagram": "https://instagram.com/",
"twitter": "https://twitter.com/",
"linkedin": "http://www.linkedin.com/",
"tumblr": "http://www.tumblr.com/",
"meetup": "http://www.meetup.com/",
"reddit": "http://www.reddit.com/",
"youtube": "https://www.youtube.com/"
}
@with_comprehensive_theme("edx.org")
def test_edx_footer(self):
"""
Verify that the homepage, when accessed at edx.org, has the edX footer
"""
resp = self.client.get('/')
assert resp.status_code == 200
self.assertContains(resp, 'footer-edx-v3')
def test_openedx_footer(self):
"""
Verify that the homepage, when accessed at something other than
edx.org, has the Open edX footer
"""
resp = self.client.get('/')
assert resp.status_code == 200
self.assertContains(resp, 'footer-openedx')
@with_comprehensive_theme("edx.org")
@override_settings(
SOCIAL_MEDIA_FOOTER_NAMES=SOCIAL_MEDIA_NAMES,
SOCIAL_MEDIA_FOOTER_URLS=SOCIAL_MEDIA_URLS
)
def test_edx_footer_social_links(self):
resp = self.client.get('/')
for name, url in self.SOCIAL_MEDIA_URLS.items():
self.assertContains(resp, url)
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['title'])
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['icon'])
|
kiwicopple/MyMDb
|
refs/heads/master
|
venv/Lib/site-packages/docutils/writers/odf_odt/pygmentsformatter.py
|
244
|
# $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
|
macopedia/hr
|
refs/heads/8.0
|
__unported__/hr_expense_sequence/__init__.py
|
14
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# HR Expense Sequence module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import hr_expense_sequence
|
paran0ids0ul/infernal-twin
|
refs/heads/master
|
build/pillow/Tests/test_lib_pack.py
|
11
|
from helper import unittest, PillowTestCase, py3
from PIL import Image
class TestLibPack(PillowTestCase):
def pack(self):
pass # not yet
def test_pack(self):
def pack(mode, rawmode):
if len(mode) == 1:
im = Image.new(mode, (1, 1), 1)
else:
im = Image.new(mode, (1, 1), (1, 2, 3, 4)[:len(mode)])
if py3:
return list(im.tobytes("raw", rawmode))
else:
return [ord(c) for c in im.tobytes("raw", rawmode)]
order = 1 if Image._ENDIAN == '<' else -1
self.assertEqual(pack("1", "1"), [128])
self.assertEqual(pack("1", "1;I"), [0])
self.assertEqual(pack("1", "1;R"), [1])
self.assertEqual(pack("1", "1;IR"), [0])
self.assertEqual(pack("L", "L"), [1])
self.assertEqual(pack("I", "I"), [1, 0, 0, 0][::order])
self.assertEqual(pack("F", "F"), [0, 0, 128, 63][::order])
self.assertEqual(pack("LA", "LA"), [1, 2])
self.assertEqual(pack("RGB", "RGB"), [1, 2, 3])
self.assertEqual(pack("RGB", "RGB;L"), [1, 2, 3])
self.assertEqual(pack("RGB", "BGR"), [3, 2, 1])
self.assertEqual(pack("RGB", "RGBX"), [1, 2, 3, 255]) # 255?
self.assertEqual(pack("RGB", "BGRX"), [3, 2, 1, 0])
self.assertEqual(pack("RGB", "XRGB"), [0, 1, 2, 3])
self.assertEqual(pack("RGB", "XBGR"), [0, 3, 2, 1])
self.assertEqual(pack("RGBX", "RGBX"), [1, 2, 3, 4]) # 4->255?
self.assertEqual(pack("RGBA", "RGBA"), [1, 2, 3, 4])
self.assertEqual(pack("RGBa", "RGBa"), [1, 2, 3, 4])
self.assertEqual(pack("RGBa", "BGRa"), [3, 2, 1, 4])
self.assertEqual(pack("RGBa", "aBGR"), [4, 3, 2, 1])
self.assertEqual(pack("CMYK", "CMYK"), [1, 2, 3, 4])
self.assertEqual(pack("YCbCr", "YCbCr"), [1, 2, 3])
def test_unpack(self):
def unpack(mode, rawmode, bytes_):
im = None
if py3:
data = bytes(range(1, bytes_+1))
else:
data = ''.join(chr(i) for i in range(1, bytes_+1))
im = Image.frombytes(mode, (1, 1), data, "raw", rawmode, 0, 1)
return im.getpixel((0, 0))
def unpack_1(mode, rawmode, value):
assert mode == "1"
im = None
if py3:
im = Image.frombytes(
mode, (8, 1), bytes([value]), "raw", rawmode, 0, 1)
else:
im = Image.frombytes(
mode, (8, 1), chr(value), "raw", rawmode, 0, 1)
return tuple(im.getdata())
X = 255
self.assertEqual(unpack_1("1", "1", 1), (0, 0, 0, 0, 0, 0, 0, X))
self.assertEqual(unpack_1("1", "1;I", 1), (X, X, X, X, X, X, X, 0))
self.assertEqual(unpack_1("1", "1;R", 1), (X, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(unpack_1("1", "1;IR", 1), (0, X, X, X, X, X, X, X))
self.assertEqual(unpack_1("1", "1", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack_1("1", "1;I", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;R", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;IR", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack("L", "L;2", 1), 0)
self.assertEqual(unpack("L", "L;4", 1), 0)
self.assertEqual(unpack("L", "L", 1), 1)
self.assertEqual(unpack("L", "L;I", 1), 254)
self.assertEqual(unpack("L", "L;R", 1), 128)
self.assertEqual(unpack("L", "L;16", 2), 2) # little endian
self.assertEqual(unpack("L", "L;16B", 2), 1) # big endian
self.assertEqual(unpack("LA", "LA", 2), (1, 2))
self.assertEqual(unpack("LA", "LA;L", 2), (1, 2))
self.assertEqual(unpack("RGB", "RGB", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;L", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;R", 3), (128, 64, 192))
self.assertEqual(unpack("RGB", "RGB;16B", 6), (1, 3, 5)) # ?
self.assertEqual(unpack("RGB", "BGR", 3), (3, 2, 1))
self.assertEqual(unpack("RGB", "RGB;15", 2), (8, 131, 0))
self.assertEqual(unpack("RGB", "BGR;15", 2), (0, 131, 8))
self.assertEqual(unpack("RGB", "RGB;16", 2), (8, 64, 0))
self.assertEqual(unpack("RGB", "BGR;16", 2), (0, 64, 8))
self.assertEqual(unpack("RGB", "RGB;4B", 2), (17, 0, 34))
self.assertEqual(unpack("RGB", "RGBX", 4), (1, 2, 3))
self.assertEqual(unpack("RGB", "BGRX", 4), (3, 2, 1))
self.assertEqual(unpack("RGB", "XRGB", 4), (2, 3, 4))
self.assertEqual(unpack("RGB", "XBGR", 4), (4, 3, 2))
self.assertEqual(unpack("RGBA", "RGBA", 4), (1, 2, 3, 4))
self.assertEqual(unpack("RGBA", "BGRA", 4), (3, 2, 1, 4))
self.assertEqual(unpack("RGBA", "ARGB", 4), (2, 3, 4, 1))
self.assertEqual(unpack("RGBA", "ABGR", 4), (4, 3, 2, 1))
self.assertEqual(unpack("RGBA", "RGBA;15", 2), (8, 131, 0, 0))
self.assertEqual(unpack("RGBA", "BGRA;15", 2), (0, 131, 8, 0))
self.assertEqual(unpack("RGBA", "RGBA;4B", 2), (17, 0, 34, 0))
self.assertEqual(unpack("RGBa", "RGBa", 4), (1, 2, 3, 4))
self.assertEqual(unpack("RGBa", "BGRa", 4), (3, 2, 1, 4))
self.assertEqual(unpack("RGBa", "aRGB", 4), (2, 3, 4, 1))
self.assertEqual(unpack("RGBa", "aBGR", 4), (4, 3, 2, 1))
self.assertEqual(unpack("RGBX", "RGBX", 4), (1, 2, 3, 4)) # 4->255?
self.assertEqual(unpack("RGBX", "BGRX", 4), (3, 2, 1, 255))
self.assertEqual(unpack("RGBX", "XRGB", 4), (2, 3, 4, 255))
self.assertEqual(unpack("RGBX", "XBGR", 4), (4, 3, 2, 255))
self.assertEqual(unpack("RGBX", "RGB;15", 2), (8, 131, 0, 255))
self.assertEqual(unpack("RGBX", "BGR;15", 2), (0, 131, 8, 255))
self.assertEqual(unpack("RGBX", "RGB;4B", 2), (17, 0, 34, 255))
self.assertEqual(unpack("CMYK", "CMYK", 4), (1, 2, 3, 4))
self.assertEqual(unpack("CMYK", "CMYK;I", 4), (254, 253, 252, 251))
self.assertRaises(ValueError, lambda: unpack("L", "L", 0))
self.assertRaises(ValueError, lambda: unpack("RGB", "RGB", 2))
self.assertRaises(ValueError, lambda: unpack("CMYK", "CMYK", 2))
if __name__ == '__main__':
unittest.main()
# End of file
|
google/earthengine-api
|
refs/heads/master
|
python/ee/tests/apifunction_test.py
|
1
|
#!/usr/bin/env python
"""Test for the ee.apifunction module."""
import types
import unittest
import ee
from ee import apitestcase
class ApiFunctionTest(apitestcase.ApiTestCase):
def testAddFunctions(self):
"""Verifies that addition of static and instance API functions."""
# Check instance vs static functions, and trampling of
# existing functions.
class TestClass(object):
def pre_addBands(self): # pylint: disable=g-bad-name
pass
self.assertFalse(hasattr(TestClass, 'pre_load'))
self.assertFalse(hasattr(TestClass, 'select'))
self.assertFalse(hasattr(TestClass, 'pre_select'))
self.assertTrue(hasattr(TestClass, 'pre_addBands'))
self.assertFalse(hasattr(TestClass, '_pre_addBands'))
ee.ApiFunction.importApi(TestClass, 'Image', 'Image', 'pre_')
self.assertNotIsInstance(TestClass.pre_load, types.MethodType)
self.assertFalse(hasattr(TestClass, 'select'))
# Unbound methods are just functions in Python 3. Check both to maintain
# backward compatibility.
self.assertIsInstance(TestClass.pre_select,
(types.FunctionType, types.MethodType))
self.assertIsInstance(TestClass.pre_addBands,
(types.FunctionType, types.MethodType))
self.assertFalse(hasattr(TestClass, '_pre_addBands'))
ee.ApiFunction.clearApi(TestClass)
self.assertFalse(hasattr(TestClass, 'pre_load'))
self.assertFalse(hasattr(TestClass, 'select'))
self.assertFalse(hasattr(TestClass, 'pre_select'))
self.assertTrue(hasattr(TestClass, 'pre_addBands'))
self.assertFalse(hasattr(TestClass, '_pre_addBands'))
def testAddFunctions_Inherited(self):
"""Verifies that inherited non-client functions can be overridden."""
class Base(object):
def ClientOverride(self):
pass
class Child(Base):
pass
ee.ApiFunction.importApi(Base, 'Image', 'Image')
ee.ApiFunction.importApi(Child, 'Image', 'Image')
self.assertEqual(Base.ClientOverride, Child.ClientOverride)
self.assertNotEqual(Base.addBands, Child.addBands)
if __name__ == '__main__':
unittest.main()
|
anirudhSK/chromium
|
refs/heads/master
|
tools/perf/metrics/speedindex_unittest.py
|
3
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These tests access private methods in the speedindex module.
# pylint: disable=W0212
import json
import os
import unittest
from telemetry.core import bitmap
from telemetry.core.backends.chrome import inspector_timeline_data
from telemetry.core.timeline import model
from metrics import speedindex
# Sample timeline data in the json format provided by devtools.
# The sample events will be used in several tests below.
_TEST_DIR = os.path.join(os.path.dirname(__file__), 'unittest_data')
_SAMPLE_DATA = json.load(open(os.path.join(_TEST_DIR, 'sample_timeline.json')))
_SAMPLE_TIMELINE_DATA = inspector_timeline_data.InspectorTimelineData(
_SAMPLE_DATA)
_SAMPLE_EVENTS = model.TimelineModel(
timeline_data=_SAMPLE_TIMELINE_DATA).GetAllEvents()
class FakeTimelineModel(object):
def __init__(self):
self._events = []
def SetAllEvents(self, events):
self._events = events
def GetAllEvents(self):
return self._events
class FakeBitmap(object):
def __init__(self, r, g, b):
self._histogram = bitmap.ColorHistogram(r, g, b, bitmap.WHITE)
# pylint: disable=W0613
def ColorHistogram(self, ignore_color=None, tolerance=None):
return self._histogram
class FakeTab(object):
def __init__(self, video_capture_result=None):
self._timeline_model = FakeTimelineModel()
self._javascript_result = None
self._video_capture_result = video_capture_result
@property
def timeline_model(self):
return self._timeline_model
@property
def video_capture_supported(self):
return self._video_capture_result is not None
def SetEvaluateJavaScriptResult(self, result):
self._javascript_result = result
def EvaluateJavaScript(self, _):
return self._javascript_result
def StartVideoCapture(self, min_bitrate_mbps=1):
assert self.video_capture_supported
assert min_bitrate_mbps > 0
def StopVideoCapture(self):
assert self.video_capture_supported
return self._video_capture_result
def Highlight(self, _):
pass
class IncludedPaintEventsTest(unittest.TestCase):
def testNumberPaintEvents(self):
impl = speedindex.PaintRectSpeedIndexImpl()
# In the sample data, there's one event that occurs before the layout event,
# and one paint event that's not a leaf paint event.
events = impl._IncludedPaintEvents(_SAMPLE_EVENTS)
self.assertEqual(len(events), 5)
class SpeedIndexImplTest(unittest.TestCase):
def testAdjustedAreaDict(self):
impl = speedindex.PaintRectSpeedIndexImpl()
paint_events = impl._IncludedPaintEvents(_SAMPLE_EVENTS)
viewport = 1000, 1000
time_area_dict = impl._TimeAreaDict(paint_events, viewport)
self.assertEqual(len(time_area_dict), 4)
# The event that ends at time 100 is a fullscreen; it's discounted by half.
self.assertEqual(time_area_dict[100], 500000)
self.assertEqual(time_area_dict[300], 100000)
self.assertEqual(time_area_dict[400], 200000)
self.assertEqual(time_area_dict[800], 200000)
def testVideoCompleteness(self):
frames = [
(0.0, FakeBitmap([ 0, 0, 0,10], [ 0, 0, 0,10], [ 0, 0, 0,10])),
(0.1, FakeBitmap([10, 0, 0, 0], [10, 0, 0, 0], [10, 0, 0, 0])),
(0.2, FakeBitmap([ 0, 0, 2, 8], [ 0, 0, 4, 6], [ 0, 0, 1, 9])),
(0.3, FakeBitmap([ 0, 3, 2, 5], [ 2, 1, 0, 7], [ 0, 3, 0, 7])),
(0.4, FakeBitmap([ 0, 0, 1, 0], [ 0, 0, 1, 0], [ 0, 0, 1, 0])),
(0.5, FakeBitmap([ 0, 4, 6, 0], [ 0, 4, 6, 0], [ 0, 4, 6, 0])),
]
max_distance = 42.
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl()
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 6)
self.assertEqual(time_completeness[0], (0.0, 0))
self.assertTimeCompleteness(
time_completeness[1], 0.1, 1 - (16 + 16 + 16) / max_distance)
self.assertTimeCompleteness(
time_completeness[2], 0.2, 1 - (12 + 10 + 13) / max_distance)
self.assertTimeCompleteness(
time_completeness[3], 0.3, 1 - (6 + 10 + 8) / max_distance)
self.assertTimeCompleteness(
time_completeness[4], 0.4, 1 - (4 + 4 + 4) / max_distance)
self.assertEqual(time_completeness[5], (0.5, 1))
def testBlankPage(self):
frames = [
(0.0, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.1, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.2, FakeBitmap([1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.3, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
]
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl()
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 4)
self.assertEqual(time_completeness[0], (0.0, 1.0))
self.assertEqual(time_completeness[1], (0.1, 1.0))
self.assertEqual(time_completeness[2], (0.2, 0.0))
self.assertEqual(time_completeness[3], (0.3, 1.0))
def assertTimeCompleteness(self, time_completeness, time, completeness):
self.assertEqual(time_completeness[0], time)
self.assertAlmostEqual(time_completeness[1], completeness)
class SpeedIndexTest(unittest.TestCase):
def testWithSampleData(self):
tab = FakeTab()
impl = speedindex.PaintRectSpeedIndexImpl()
viewport = 1000, 1000
# Add up the parts of the speed index for each time interval.
# Each part is the time interval multiplied by the proportion of the
# total area value that is not yet painted for that interval.
parts = []
parts.append(100 * 1.0)
parts.append(200 * 0.5)
parts.append(100 * 0.4)
parts.append(400 * 0.2)
expected = sum(parts) # 330.0
tab.timeline_model.SetAllEvents(_SAMPLE_EVENTS)
tab.SetEvaluateJavaScriptResult(viewport)
actual = impl.CalculateSpeedIndex(tab)
self.assertEqual(actual, expected)
class WPTComparisonTest(unittest.TestCase):
"""Compare the speed index results with results given by webpagetest.org.
Given the same timeline data, both this speedindex metric and webpagetest.org
should both return the same results. Fortunately, webpagetest.org also
provides timeline data in json format along with the speed index results.
"""
def _TestJsonTimelineExpectation(self, filename, viewport, expected):
"""Check whether the result for some timeline data is as expected.
Args:
filename: Filename of a json file which contains a
expected: The result expected based on the WPT result.
"""
tab = FakeTab()
impl = speedindex.PaintRectSpeedIndexImpl()
file_path = os.path.join(_TEST_DIR, filename)
with open(file_path) as json_file:
raw_events = json.load(json_file)
timeline_data = inspector_timeline_data.InspectorTimelineData(raw_events)
tab.timeline_model.SetAllEvents(
model.TimelineModel(timeline_data=timeline_data).GetAllEvents())
tab.SetEvaluateJavaScriptResult(viewport)
actual = impl.CalculateSpeedIndex(tab)
# The result might differ by 1 or more milliseconds due to rounding,
# so compare to the nearest 10 milliseconds.
self.assertAlmostEqual(actual, expected, places=-1)
def testCern(self):
# Page: http://info.cern.ch/hypertext/WWW/TheProject.html
# This page has only one paint event.
self._TestJsonTimelineExpectation(
'cern_repeat_timeline.json', (1014, 650), 379.0)
def testBaidu(self):
# Page: http://www.baidu.com/
# This page has several paint events, but no nested paint events.
self._TestJsonTimelineExpectation(
'baidu_repeat_timeline.json', (1014, 650), 1761.43)
def test2ch(self):
# Page: http://2ch.net/
# This page has several paint events, including nested paint events.
self._TestJsonTimelineExpectation(
'2ch_repeat_timeline.json', (997, 650), 674.58)
if __name__ == "__main__":
unittest.main()
|
EasonYi/zulip
|
refs/heads/master
|
zerver/test_events.py
|
116
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from zerver.models import (
get_client, get_realm, get_stream, get_user_profile_by_email,
Message, Recipient,
)
from zerver.lib.actions import (
apply_events,
create_stream_if_needed,
do_add_alert_words,
do_add_realm_emoji,
do_add_realm_filter,
do_change_avatar_source,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_full_name,
do_change_is_admin,
do_change_stream_description,
do_create_user,
do_deactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_realm_emoji,
do_remove_realm_filter,
do_remove_subscription,
do_rename_stream,
do_set_muted_topics,
do_set_realm_name,
do_set_realm_restricted_to_domain,
do_set_realm_invite_required,
do_set_realm_invite_by_admins_only,
do_update_message,
do_update_pointer,
do_change_twenty_four_hour_time,
do_change_left_side_userlist,
fetch_initial_state_data,
)
from zerver.lib.event_queue import allocate_client_descriptor
from zerver.lib.test_helpers import AuthedTestCase, POSTRequestMock
from zerver.lib.validator import (
check_bool, check_dict, check_int, check_list, check_string,
equals, check_none_or
)
from zerver.views import _default_all_public_streams, _default_narrow
from zerver.tornadoviews import get_events_backend
from collections import OrderedDict
import ujson
class GetEventsTest(AuthedTestCase):
def tornado_call(self, view_func, user_profile, post_data,
callback=None):
request = POSTRequestMock(post_data, user_profile, callback)
return view_func(request, user_profile)
def test_get_events(self):
email = "hamlet@zulip.com"
recipient_email = "othello@zulip.com"
user_profile = get_user_profile_by_email(email)
recipient_user_profile = get_user_profile_by_email(recipient_email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0, True)
local_id = 10.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id += 0.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self):
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"narrow": ujson.dumps([["stream", "denmark"]]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0, True)
self.send_message(email, "othello@zulip.com", Recipient.PERSONAL, "hello")
self.send_message(email, "Denmark", Recipient.STREAM, "hello")
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["display_recipient"], "Denmark")
class EventsRegisterTest(AuthedTestCase):
maxDiff = None
user_profile = get_user_profile_by_email("hamlet@zulip.com")
bot = get_user_profile_by_email("welcome-bot@zulip.com")
def create_bot(self, email):
return do_create_user(email, '123',
get_realm('zulip.com'), 'Test Bot', 'test',
bot=True, bot_owner=self.user_profile)
def realm_bot_schema(self, field_name, check):
return check_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict([
('email', check_string),
(field_name, check),
])),
])
def do_test(self, action, event_types=None):
client = allocate_client_descriptor(self.user_profile.id, self.user_profile.realm.id,
event_types,
get_client("website"), True, False, 600, [])
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(self.user_profile, event_types, "")
action()
events = client.event_queue.contents()
self.assertTrue(len(events) > 0)
apply_events(hybrid_state, events, self.user_profile)
normal_state = fetch_initial_state_data(self.user_profile, event_types, "")
self.match_states(hybrid_state, normal_state)
return events
def assert_on_error(self, error):
if error:
raise AssertionError(error)
def match_states(self, state1, state2):
def normalize(state):
state['realm_users'] = {u['email']: u for u in state['realm_users']}
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1, state2)
def test_send_message_events(self):
schema_checker = check_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', check_dict([
('avatar_url', check_string),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('gravatar_hash', check_string),
('id', check_int),
('recipient_id', check_int),
('sender_domain', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('subject', check_string),
('subject_links', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
events = self.do_test(lambda: self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
('orig_subject', check_string),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
# There is also a timestamp field in the event, but we ignore it, as
# it's kind of an unwanted but harmless side effect of calling log_event.
])
message_id = Message.objects.order_by('-id')[0].id
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
events = self.do_test(lambda: do_update_message(self.user_profile, message_id, topic, propagate_mode, content))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self):
schema_checker = check_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self):
realm_user_add_checker = check_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
])),
])
stream_create_checker = check_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
])))
])
events = self.do_test(lambda: self.register("test1", "test1"))
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
error = stream_create_checker('events[1]', events[1])
self.assert_on_error(error)
def test_alert_words_events(self):
alert_words_checker = check_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_muted_topics_events(self):
muted_topics_checker = check_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
events = self.do_test(lambda: do_set_muted_topics(self.user_profile, [["Denmark", "topic"]]))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self):
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_name(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('name')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_name(self.user_profile.realm, 'New Realm Name'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_restricted_to_domain(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('restricted_to_domain')),
('value', check_bool),
])
# The first True is probably a noop, then we get transitions in both directions.
for restricted_to_domain in (True, False, True):
events = self.do_test(lambda: do_set_realm_restricted_to_domain(self.user_profile.realm, restricted_to_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_required(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_required')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_required in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_required(self.user_profile.realm, invite_required))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_by_admins_only(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_by_admins_only(self.user_profile.realm, invite_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self):
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('is_admin', check_bool),
])),
])
# The first False is probably a noop, then we get transitions in both directions.
for is_admin in [False, True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_twenty_four_hour_time(self):
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('twenty_four_hour_time')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_twenty_four_hour_time(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_left_side_userlist(self):
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('left_side_userlist')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_left_side_userlist(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_emoji_events(self):
schema_checker = check_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
events = self.do_test(lambda: do_add_realm_emoji(get_realm("zulip.com"), "my_emoji",
"https://realm.com/my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(get_realm("zulip.com"), "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self):
schema_checker = check_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(get_realm("zulip.com"), "#[123]",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(get_realm("zulip.com"), "#[123]"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self):
bot_created_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
])),
])
action = lambda: self.create_bot('test-bot@zulip.com')
events = self.do_test(action)
error = bot_created_checker('events[1]', events[1])
self.assert_on_error(error)
def test_change_bot_full_name(self):
action = lambda: do_change_full_name(self.bot, 'New Bot Name')
events = self.do_test(action)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self):
action = lambda: do_regenerate_api_key(self.bot)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self):
action = lambda: do_change_avatar_source(self.bot, self.bot.AVATAR_FROM_USER)
events = self.do_test(action)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self):
action = lambda: do_change_default_all_public_streams(self.bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self):
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_sending_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self):
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_events_register_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self):
bot_deactivate_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_rename_stream(self):
realm = get_realm('zulip.com')
stream, _ = create_stream_if_needed(realm, 'old_name')
new_name = u'stream with a brand new name'
self.subscribe_to_stream(self.user_profile.email, stream.name)
action = lambda: do_rename_stream(realm, stream.name, new_name)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_subscribe_events(self):
subscription_schema_checker = check_list(
check_dict([
('color', check_string),
('description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('in_home_view', check_bool),
('name', check_string),
('desktop_notifications', check_bool),
('audible_notifications', check_bool),
('stream_id', check_int),
('subscribers', check_list(check_int)),
])
)
add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_email', check_string),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_email', check_string),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('name', check_string),
])
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action, event_types=["subscription", "realm_user"])
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("othello@zulip.com", "test_stream")
events = self.do_test(action)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
action = lambda: do_remove_subscription(get_user_profile_by_email("othello@zulip.com"), stream)
events = self.do_test(action)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_remove_subscription(get_user_profile_by_email("hamlet@zulip.com"), stream)
events = self.do_test(action)
error = remove_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(get_realm('zulip.com'), 'test_stream', u'new description')
events = self.do_test(action)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
from zerver.lib.event_queue import EventQueue
class EventQueueTest(TestCase):
def test_one_event(self):
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
self.assertFalse(queue.empty())
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"}])
def test_event_collapsing(self):
queue = EventQueue("1")
for pointer_val in xrange(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{'id': 8,
'type': 'pointer',
"pointer": 9,
"timestamp": "9"}])
queue = EventQueue("2")
for pointer_val in xrange(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "unknown"})
queue.push({"type": "restart", "server_generation": "1"})
for pointer_val in xrange(11, 20):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "restart", "server_generation": "2"})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"}])
for pointer_val in xrange(21, 23):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"},
{'id': 22,
'type': 'pointer',
"pointer": 22,
"timestamp": "22"},
])
def test_flag_add_collapsing(self):
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "read",
"operation": "add",
"messages": [1,2,3,4,5,6],
"timestamp": "1"}])
def test_flag_remove_collapsing(self):
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1,2,3,4,5,6],
"timestamp": "1"}])
def test_collapse_event(self):
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
queue.push({"type": "unknown",
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"},
{'id': 1,
'type': 'unknown',
"timestamp": "1"}])
class TestEventsRegisterAllPublicStreamsDefaults(TestCase):
def setUp(self):
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
def test_use_passed_all_public_true_default_false(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(TestCase):
def setUp(self):
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self):
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [('stream', 'my_stream')])
self.assertEqual(result, [('stream', 'my_stream')])
def test_use_passed_narrow_with_default(self):
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [('stream', 'my_stream')])
self.assertEqual(result, [('stream', 'my_stream')])
def test_use_default_if_narrow_is_empty(self):
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [('stream', 'Verona')])
def test_use_narrow_if_default_is_none(self):
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
|
softmixt/grayscale
|
refs/heads/master
|
grayscale/__init__.py
|
1
|
from grayscale.core.processor import Processor
|
droolsjbpm/drools
|
refs/heads/master
|
drools-decisiontables/src/main/resources/python-dt/pydt_test.py
|
44
|
import pydt
test_fact = { "Age" : 42, "Risk" : "'HIGH'", "PolicyType" : "'COMPREHENSIVE'" }
test_table = {
"condition_headers" : [ ["A" , "Age"], ["B", "Risk =="], ["C", "PolicyType =="]],
"action_headers" : [ ["F","Premium"], ["G","Log"]],
"data" : [
{"row" : 2, "A" : "> 2", "B" : "'HIGH'", "C": "'COMPREHENSIVE'", "F" : "245"},
{"row" : 3, "A" : "< 25 ", "B" : "'LOW'", "F" : "390"}
]
}
#and now some crude test code
pydt.process_dt(test_fact, test_table)
print "RESULT: " + str(test_fact)
if not test_fact.has_key("Premium") :
print("ERROR: no premium was calculated")
if test_fact["Premium"] == '245' :
print("PASSED STEP 1")
else :
print("FAILED STEP 1: Premium was " + test_fact["Premium"])
#some simple test
tbl = pydt.load_xls("Example.xls")
if tbl['condition_headers'][0][1] == "Age" :
print "PASSED STEP 2"
else:
print "FAILED STEP 2"
#now test it all, end to end
test_fact = { "Age" : 42, "Risk" : "'HIGH'", "PolicyType" : "'COMPREHENSIVE'" }
pydt.process_dt(test_fact, tbl)
if not test_fact.has_key("Premium") :
print("ERROR: no premium was calculated")
premium = test_fact["Premium"]
if premium == 245 :
print("PASSED STEP 3")
else :
print("FAILED STEP 3: Premium was " + test_fact["Premium"])
|
ShassAro/ShassAro
|
refs/heads/master
|
Bl_project/blVirtualEnv/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/storage.py
|
85
|
from datetime import datetime
from importlib import import_module
import os
import tempfile
from django.http import HttpRequest, HttpResponse
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import SimpleUploadedFile
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
def get_request():
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class TestStorage(object):
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
def test_current_step(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
my_step = 2
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
self.assertEqual(storage.current_step, my_step)
storage.reset()
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.current_step, None)
def test_step_data(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
step1 = 'start'
step_data1 = {'field1': 'data1',
'field2': 'data2',
'field3': datetime.now(),
'field4': self.testuser}
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
self.assertEqual(storage.get_step_data(step1), step_data1)
storage.reset()
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.get_step_data(step1), None)
def test_extra_context(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
extra_context = {'key1': 'data1',
'key2': 'data2',
'key3': datetime.now(),
'key4': self.testuser}
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
self.assertEqual(storage.extra_data, extra_context)
storage.reset()
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.extra_data, {})
def test_extra_context_key_persistence(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
self.assertFalse('test' in storage.extra_data)
storage.extra_data['test'] = True
self.assertTrue('test' in storage.extra_data)
def test_reset_deletes_tmp_files(self):
request = get_request()
storage = self.get_storage()('wizard1', request, temp_storage)
step = 'start'
file_ = SimpleUploadedFile('file.txt', b'content')
storage.set_step_files(step, {'file': file_})
with storage.get_step_files(step)['file'] as file:
tmp_name = file.name
self.assertTrue(storage.file_storage.exists(tmp_name))
storage.reset()
storage.update_response(HttpResponse())
self.assertFalse(storage.file_storage.exists(tmp_name))
|
mayk93/CLRS
|
refs/heads/master
|
Algorithms/QuickSelect-Example.py
|
1
|
'''
Taken from Rosseta - http://rosettacode.org/wiki/Quickselect_algorithm#Python
Not my code.
Educational purposes.
'''
import random
def partition(vector, left, right, pivotIndex):
pivotValue = vector[pivotIndex]
vector[pivotIndex], vector[right] = vector[right], vector[pivotIndex] # Move pivot to end
storeIndex = left
for i in range(left, right):
if vector[i] < pivotValue:
vector[storeIndex], vector[i] = vector[i], vector[storeIndex]
storeIndex += 1
vector[right], vector[storeIndex] = vector[storeIndex], vector[right] # Move pivot to its final place
return storeIndex
def _select(vector, left, right, k):
"Returns the k-th smallest, (k >= 0), element of vector within vector[left:right+1] inclusive."
while True:
pivotIndex = random.randint(left, right) # select pivotIndex between left and right
pivotNewIndex = partition(vector, left, right, pivotIndex)
pivotDist = pivotNewIndex - left
if pivotDist == k:
return vector[pivotNewIndex]
elif k < pivotDist:
right = pivotNewIndex - 1
else:
k -= pivotDist + 1
left = pivotNewIndex + 1
def select(vector, k, left=None, right=None):
"""\
Returns the k-th smallest, (k >= 0), element of vector within vector[left:right+1].
left, right default to (0, len(vector) - 1) if omitted
"""
if left is None:
left = 0
lv1 = len(vector) - 1
if right is None:
right = lv1
assert vector and k >= 0, "Either null vector or k < 0 "
assert 0 <= left <= lv1, "left is out of range"
assert left <= right <= lv1, "right is out of range"
return _select(vector, left, right, k)
if __name__ == '__main__':
v = [9, 8, 7, 6, 5, 0, 1, 2, 3, 4]
print([select(v, i) for i in range(10)])
|
gomyhr/elveg2osm
|
refs/heads/master
|
elveg2osm.py
|
1
|
#! /usr/bin/env python2
import sys
import os
import osmapis
import csv
import numpy as np
import geographiclib.geodesic as gg
import collections
# Add useful (for our purpose) methods to the osmapis.OSM class
class ElvegOSM(osmapis.OSM):
def __init__(self, items=()):
# First call the parent's __init__
super(ElvegOSM, self).__init__(items)
# Generate dict with TRANSID as key and is as value
self.wayid_dict = {}
for wayid,way in self.ways.iteritems():
transid = way.tags['TRANSID']
self.wayid_dict[transid] = wayid
def way_nodes_from_transid(self, transid):
wayid = self.wayid_dict[transid]
way = self.ways[wayid]
node_ids = way.nds
nodes = [osmobj.nodes[nid] for nid in node_ids]
return nodes
def distances_from_transid(self, transid):
global g
nodes = self.way_nodes_from_transid(transid)
node_distances = []
distance_so_far = 0.
prev_lon = nodes[0].lon
prev_lat = nodes[0].lat
for i,nd in enumerate(nodes):
#az1,az2,d_from_previous = g.inv(prev_lon, prev_lat, nd.lon, nd.lat)
ggresults = gg.Geodesic.WGS84.Inverse(prev_lat, prev_lon, nd.lat, nd.lon)
d_from_previous = ggresults['s12']
if i != 0 and d_from_previous < 0.5:
# Report if very short distance
warn(u"Short distance ({2}) for transid {0} to node No. {1}".format(transid, i,d_from_previous))
distance_so_far += d_from_previous
node_distances.append(distance_so_far)
# Prepare previous coordinates for next round
prev_lon = nd.lon
prev_lat = nd.lat
return node_distances
class ElvegNode(osmapis.Node):
def __init__(self, attribs={}, tags={}):
osmapis.Node.__init__(self, attribs, tags)
# Make sure the class counter is as low as the lowest existing ID
# This should probably have been done in osmapis.Node
if self.id is not None:
self.__class__._counter = min(self.__class__._counter, self.id)
class ElvegWay(osmapis.Way):
def __init__(self, attribs={}, tags={}, nds=()):
osmapis.Way.__init__(self, attribs, tags, nds)
# Make sure the class counter is as low as the lowest existing ID
# This should probably have been done in osmapis.Way
if self.id is not None:
self.__class__._counter = min(self.__class__._counter, self.id)
# Override default classes in osmapis.py
osmapis.wrappers["osm"] = ElvegOSM
osmapis.wrappers["node"] = ElvegNode
osmapis.wrappers["way"] = ElvegWay
def warn(warning):
warning = warning.encode('utf-8')
sys.stderr.write(warning + '\n')
def waynode_from_coord(coord):
# This assumes that there is only one node for a given
# coordinates that is part of a way.
global way_node_ids
global node_lookup
way_nodes = [nid for nid in node_lookup[coord] if nid in way_node_ids]
if len(way_nodes) > 1:
sys.stderr.write('More than one way nodes at coordinate:\n')
sys.stderr.write(str(coord) + '\n')
elif len(way_nodes) == 0:
#sys.stderr.write('No way nodes at coordinate:\n')
#sys.stderr.write(str(coord) + '\n')
return None
return way_nodes[0]
def merge_nodes(node_id_list):
global osmobj
# Record the attributes of the first node
first_attr = osmobj.nodes[node_id_list[0]].attribs
del first_attr['id']
# Join the way_ids lists of the nodes
way_ids = set()
for node_id in node_id_list:
way_ids.update(osmobj.nodes[node_id].way_ids)
# Join the tags
merged_tags = {}
for node_id in node_id_list:
for key,value in osmobj.nodes[node_id].tags.iteritems():
if merged_tags.has_key(key):
# Potential conflict, but only if value is different
if merged_tags[key] != value:
# A conflict for real
msg = u"Conflict values when merging tag {0} from node {1}: {2} and {3}".format(
key, node_id, merged_tags[key], value)
warn(msg)
# No conflict, so copy tag
merged_tags[key] = value
# Delete the node
osmobj.nodes.pop(node_id)
# Create a new node
merged_node = ElvegNode(attribs=first_attr, tags=merged_tags)
merged_node.way_ids = way_ids
osmobj.add(merged_node)
# Replace deleted node_ids with new node_id in all affected ways
for way_id in way_ids:
way = osmobj.ways[way_id]
for i,way_node_id in enumerate(way.nds):
if way_node_id in node_id_list:
way.nds[i] = merged_node.id
def get_highwayclass(vegkategori, vegnummer):
category2highwayclass = {'E': 'trunk', # Europaveg
'R': 'trunk', # Riksveg
'F': 'secondary', # Fylkesveg, could also be primary
'K': 'residential', # Kommunal veg
'P': 'service', # Privat veg
'S': 'service'} # Skogsbilveg, possibly more info in the LBVKLASSE tag
highwayclass = category2highwayclass[vegkategori]
if vegkategori == 'F' and len(vegnummer) < 4:
highwayclass = 'primary'
return highwayclass
def create_osmtags(elveg_tags):
'''Create tags based on standard tags in ????Elveg_default.osm'''
roadOBJTYPEs = set([u'VegSenterlinje',
u'Svingekonnekteringslenke',
u'Kj\xf8refelt',
u'Kj\xf8rebane'])
osmtags = dict()
# Verify that the compulsory OBJTYPE tag is present
if not elveg_tags.has_key('OBJTYPE'):
warn(u"Missing OBJTYPE tag for TRANSID {TRANSID}".format(**elveg_tags))
return osmtags
# Roads and ferry routes share many tags, and are therefore
# treated together
if elveg_tags['OBJTYPE'] in roadOBJTYPEs.union([u'Bilferjestrekning']) :
# Split VNR tag
# The "vegnummer" tag is optional, but let's assume it is always present for now
# (i.e. fix it if it causes problems)
if elveg_tags.has_key('VNR'):
vegkategori,vegstatus,vegnummer = [s.strip(':;') for s in elveg_tags['VNR'].split()]
else:
warn(u"VNR missing for OBJTYPE {OBJTYPE} with TRANSID {TRANSID}".format(**elveg_tags))
return osmtags
# There are more vegstatus values than listed in https://wiki.openstreetmap.org/w/images/c/cc/Elveg_SOSI_4.0_2008.pdf
# There is a more complete list in chapter 7.3.11 in
# http://www.statkart.no/Documents/Standard/SOSI-standarden%20del%201%20og%202/SOSI%20standarden/Vegnett.pdf
if elveg_tags['OBJTYPE'] in roadOBJTYPEs:
# Set the road category
if vegstatus in ['V','T','W']: # Eksisterende veg, Veg med midlertidig status, Midlertidig veg mer enn et aar
osmtags['highway'] = get_highwayclass(vegkategori, vegnummer)
elif vegstatus == 'A':
osmtags['highway'] = 'construction'
osmtags['construction'] = get_highwayclass(vegkategori, vegnummer)
elif vegstatus == 'G':
osmtags['FIXME'] = u'Veggrunn, ikke trafikkform\xe5l. Select appropriate road type.'
osmtags['highway'] = 'road'
elif vegstatus == 'M':
osmtags['FIXME'] = u'Finn riktig tag for m\xf8te og rasteplass'
elif vegstatus in ['P','Q']: # Vedtatt veg, planlagt veg
osmtags['DEBUG'] = 'Vedtatt (P) eller planglagt (Q): ' + vegstatus
osmtags['action'] = 'delete'
else:
warn(u"Unknown vegstatus {0} for {2} with TRANSID {1}".format(vegstatus,elveg_tags['TRANSID'],elveg_tags['OBJTYPE']))
elif elveg_tags['OBJTYPE'] == u'Bilferjestrekning':
# Set the ferry for the ferry route
if vegstatus == 'S':
osmtags['route'] = 'ferry'
osmtags['ferry'] = get_highwayclass(vegkategori, vegnummer)
elif vegstatus in ['E','F']: # Vedtatt fergestrekning, planlagt fergestrekning
osmtags['DEBUG'] = 'Vedtatt fergestrekning, planlagt fergestrekning ' + vegstatus
osmtags['action'] = 'delete'
else:
warn(u"Ferry route with TRANSID {0} has unknown vegstatus {1}".format(elveg_tags['TRANSID'],vegstatus))
# Add ref to road kategories Europaveg, Riksveg and Fylkesveg
if vegkategori == 'E':
osmtags['ref'] = 'E ' + vegnummer
elif vegkategori in ['R', 'F']:
osmtags['ref'] = vegnummer
# Gang- og sykkelveg. Only a fraction of all of those are in the data.
# Nevertheless, include those that are.
elif elveg_tags['OBJTYPE'] == 'GangSykkelVegSenterlinje':
osmtags['highway'] = 'cycleway'
osmtags['foot'] = 'designated'
# Sykkelveg. Those were not present in 2015 data, but in 2016 they showed
# up in 12 municipalities, and in 2017 they are present in 32 municipalities.
elif elveg_tags['OBJTYPE'] == 'SykkelVegSenterlinje':
osmtags['highway'] = 'cycleway'
# OBJTYPE=Fortau is sometimes used when a Gang- og sykkelveg goes over
# in a sidewalk for a while
# A sidewalk is usually best represented as a sidewalk=* on a road,
# but at least in the conversion we let it be a separate way.
elif elveg_tags['OBJTYPE'] == 'Fortau':
osmtags['highway'] = 'footway'
osmtags['footway'] = 'sidewalk'
osmtags['note'] = 'Consider adding sidewalk as a tag on the road'
# Import OBJTYPE=u'Frittst\xe5endeTrapp'
# There are many objects in Bergen (1201) and quite a few in
# Stavanger (1103) and Sandnes (1102) as well.
# They are often integrated with the network of footways.
# There seems to be no consistent direction of the ways,
# so do not set incline=up/down
elif elveg_tags['OBJTYPE'] == u'Frittst\xe5endeTrapp':
osmtags['highway'] = 'steps'
# OBJTYPE not handled - add deletion tag and return
else:
warn(u"Deleting unimplemented OBJTYPE {OBJTYPE} with TRANSID {TRANSID}".format(**elveg_tags))
osmtags['DEBUG'] = 'OBJTYPE not handled: ' + elveg_tags['OBJTYPE']
osmtags['action'] = 'delete'
return osmtags
### Finished switching between OBJTYPEs
### From now on we have one of the known OBJTYPEs above
# Add information about lanes from the VKJORFLT tag (oneway=*, lanes=*)
if elveg_tags.has_key('VKJORFLT'):
lane_code = elveg_tags['VKJORFLT']
if elveg_tags['OBJTYPE'] in roadOBJTYPEs:
lane_tags = parse_lanes(lane_code)
osmtags.update(lane_tags)
elif elveg_tags['OBJTYPE'] == 'GangSykkelVegSenterlinje':
if lane_code == '1#2':
# This is the standard - add not tags
pass
elif lane_code == '1':
# In March 2015 only present in bicycle roundabouts in
# Stavanger (1103)
osmtags['oneway'] = 'yes'
elif lane_code == '2':
# In March 2015 only present in Vestre Torggaten in
# Bergen (1201)
# That one seems to be wrong (there is a one-way sign
# in the other direction)
osmtags['oneway'] = '-1'
else:
# This reacts to cycleways in Trondheim with lane_code 1#2#3S#4S
warn(u"Ignoring VKJORFLT tag for GangSykkelVegSenterlinje with TRANSID {TRANSID}: {VKJORFLT}".format(**elveg_tags))
else:
# Not road, not cycleway
if lane_code != '1#2': # No warning for default 1#2
warn(u"Ignoring VKJORFLT tag for OBJTYPE {OBJTYPE} for TRANSID {TRANSID}: {VKJORFLT}".format(**elveg_tags))
# Import GATENAVN for any type of way, although it would probably only exist for road objects
# There are some empty GATENAVN values in the data set - do not set a name for those
if elveg_tags.has_key('GATENAVN') and elveg_tags['GATENAVN'] != '':
osmtags['name'] = elveg_tags['GATENAVN']
# Add information about tunnels and bridges from MEDIUM tag
if elveg_tags.has_key('MEDIUM'):
# Give a warning if this tag is on a non-road object
if elveg_tags['OBJTYPE'] not in roadOBJTYPEs.union([u'GangSykkelVegSenterlinje']):
warn(u"Processing MEDIUM tag for OBJTYPE {OBJTYPE} for TRANSID {TRANSID}: {MEDIUM}".format(**elveg_tags))
if elveg_tags['MEDIUM'] == 'L':
osmtags['bridge'] = 'yes'
osmtags['layer'] = '1'
elif elveg_tags['MEDIUM'] == 'U':
osmtags['tunnel'] = 'yes'
osmtags['layer'] = '-1'
elif elveg_tags['MEDIUM'] == 'B':
# B means "through a building".
# This could be tagged with covered=yes (current tagging
# for Perleporten in Trondheim), but tunnel=building_passage
# seems to be preferred.
warn(u"Processing MEDIUM tag 'B' for OBJTYPE {OBJTYPE} for TRANSID {TRANSID}".format(**elveg_tags))
osmtags['tunnel'] = 'building_passage'
else:
# There should be no other possible values for MEDIUM
warn(u"Unknown MEDIUM value '{MEDIUM}' for OBJTYPE {OBJTYPE} for TRANSID {TRANSID}".format(**elveg_tags))
# Add the nvdb:id tag from the TRANSID tag
# All ways should have a TRANSID (will change to LOKALID with SOSI 4.5)
osmtags['nvdb:id'] = elveg_tags['TRANSID']
# Add date from NVDB
if elveg_tags.has_key('DATAFANGSTDATO'):
date = elveg_tags['DATAFANGSTDATO']
osmtags['nvdb:date'] = '%s-%s-%s' % (date[0:4],date[4:6],date[6:8])
return osmtags
def parse_lanes(lane_string):
lane_tags = dict()
# Strip whitespace from lane_string
lane_string = lane_string.strip()
# Early exit for the most coommone values
if lane_string == '1#2':
# Most common case - one lane in each direction - no special tags
pass
elif lane_string in ('1', '3'):
# One-way street along way direction
lane_tags['oneway'] = 'yes'
elif lane_string in ('2', '4'):
# One-way street opposite to way direction
lane_tags['oneway'] = '-1'
elif lane_string == '1#3':
# One-way street along way direction
lane_tags['oneway'] = 'yes'
lane_tags['lanes'] = '2'
elif lane_string == '2#4':
# One-way street along way direction
lane_tags['oneway'] = '-1'
lane_tags['lanes'] = '2'
elif lane_string == '1#3#5':
# One-way street along way direction
lane_tags['oneway'] = 'yes'
lane_tags['lanes'] = '3'
elif lane_string == '2#4#6':
# One-way street along way direction
lane_tags['oneway'] = '-1'
lane_tags['lanes'] = '3'
elif lane_string == '':
# Sometimes this tag is empty -- assume that this means nothing special
pass
# TURN LANES
elif lane_string in ('1V1', '1H1', '1V2','1H2'):
# Left/right turning lane - mark as one-way
lane_tags['oneway'] = 'yes'
elif lane_string in ('2V1','2H1', '2V2', '2H2'):
# Left/right turning lane - mark as one-way
lane_tags['oneway'] = '-1'
elif lane_string in ('1#1V1', '1#1H1', '1#1H1#1V1'):
# One lane and additional turn lane - ignore the turn lane
lane_tags['oneway'] = 'yes'
elif lane_string in ('2#2V1', '2#2H1', '2#2H1#2V1'):
# One lane and additional turn lane - ignore the turn lane
lane_tags['oneway'] = '-1'
elif lane_string in ('1#2#2H1', '1#1H1#2', '1#2#2V1', '1#1V1#2', '1#2#1H1', '1#2#1V1'):
# One lane each direction and additional turn lane - ignore the turn lane
pass
# BIKE LANES
elif lane_string == '1#2#3S#4S':
# Two lanes with bike lanes in both directions
lane_tags['cycleway'] = 'lane'
elif lane_string == '1#3S':
# One lane plus bike lane
lane_tags['oneway'] = 'yes'
lane_tags['cycleway'] = 'lane'
elif lane_string == '2#4S':
# One lane plus bike lane
lane_tags['oneway'] = '-1'
lane_tags['cycleway'] = 'lane'
# BUS LANES
elif lane_string == '1#3K':
# One lane plus bus lane
lane_tags['oneway'] = 'yes'
lane_tags['lanes'] = '2'
lane_tags['lanes:psv'] = '1'
elif lane_string == '2#4K':
# One lane plus bus lane
lane_tags['oneway'] = '-1'
lane_tags['lanes'] = '2'
lane_tags['lanes:psv'] = '1'
elif lane_string == '2#4#6K':
# One lane plus bus lane
lane_tags['oneway'] = '-1'
lane_tags['lanes'] = '3'
lane_tags['lanes:psv'] = '1'
elif lane_string in ('1K', '3K'):
# Single bus lane
lane_tags['oneway'] = 'yes'
lane_tags['psv'] = 'designated'
else:
# TODO: Split lane string into individual lanes
# Postfix H1, H2, V1, V2 are for turning lanes,
# postfix K is for public service vehicles (PSV)
# postfix O is for "waiting lanes", e.g. at ferry terminals.
lane_tags['FIXME'] = "Consider adding lane tags based on Elveg data: {0}".format(lane_string)
#warn("Unhandled VKJORFLT: " + lane_string)
return lane_tags
def split_way(osmobj, way_id, split_points):
'''Split way at split points.
Return list of way ids for the split way. The first id is of the
original way.
'''
# Do not go through the hassle, if the way needs no splitting
if len(split_points) == 0:
return [way_id]
# Initialize a list of way id's of the new ways (to be returned)
# Since the last way is always split off first, the list will be
# in reverse order, and is turned around at the end.
splitway_id_list = []
# Get the way that is to be split
way = osmobj.ways[way_id]
transid = way.elveg_tags['TRANSID']
# Compute distances from start to each node of way
node_distances = osmobj.distances_from_transid(transid)
geo_length = node_distances[-1]
# Compute VPA length and normalize split_points to geographic length
if way.elveg_tags.has_key("VPA"):
vpa = [int(n.strip(':;')) for n in way.elveg_tags["VPA"].split()]
else:
# These roads are probably not split, so 1.0 is fine, but raise Exception for now
#corrction_factor = 1.0
raise KeyError("VPA Elveg tag not present")
vpa_length = vpa[2] - vpa[1]
normalization_factor = geo_length / float(vpa_length)
split_points_normalized = [normalization_factor * sp for sp in split_points]
# Make sure the normalized split points are sorted
# (so that we can split off ways from the end of the list)
split_points_normalized.sort()
# Loop over the split points, splitting off the last way each time
while len(split_points_normalized) > 0:
current_split_point = split_points_normalized.pop()
upper_split_index = np.searchsorted(node_distances, current_split_point)
# Find the distance to the nearest nodes
# (for checking if a new node should be created)
distance_to_upper = node_distances[upper_split_index] - current_split_point
distance_to_lower = current_split_point - node_distances[upper_split_index - 1]
# Decide if a new node should be created
# Reuse node if closer than 0.5 m
if distance_to_upper < 0.5 or distance_to_lower < 0.5:
# Verify that we have no negative distances (which is a bug)
if distance_to_upper < 0. or distance_to_lower < 0.:
warn(u"Negative distances for TRANSID {0}".format(transid))
# Reuse closest node
if distance_to_upper < distance_to_lower:
split_index = upper_split_index
else:
split_index = upper_split_index - 1
# Create a new way from the split node to the end of the way
newway_nodes = way.nds[split_index:]
newway = ElvegWay(tags=way.tags, nds=newway_nodes)
newway.elveg_tags = way.elveg_tags
splitway_id_list.append(newway.id)
osmobj.ways[newway.id] = newway
# Remove the new way from the old way
# (the split_index should be included in both ways)
way.nds = way.nds[:split_index + 1]
else:
# Find the coordinates for the new split node
from_node_id = way.nds[upper_split_index - 1]
to_node_id = way.nds[upper_split_index]
from_node = osmobj.nodes[from_node_id]
to_node = osmobj.nodes[to_node_id]
ggresults = gg.Geodesic.WGS84.Inverse(from_node.lat, from_node.lon, to_node.lat, to_node.lon)
distance = ggresults['s12']
azi1 = ggresults['azi1']
dist_from_last_node = current_split_point - node_distances[upper_split_index - 1]
ggresults = gg.Geodesic.WGS84.Direct(from_node.lat, from_node.lon, azi1, dist_from_last_node)
newlon = ggresults['lon2']
newlat = ggresults['lat2']
# Create the new node
split_node = ElvegNode(attribs={"lon": newlon, "lat": newlat})
if osmobj.nodes.has_key(split_node.id):
# This should not happen if ElvegNode.__init__() does the right thing
raise Exception(u"Almost overwrote node {0}\n".format(split_node.id).encode('utf-8'))
osmobj.nodes[split_node.id] = split_node
# FOR DEBUGGING WAY SPLITTING
#osmobj.nodes[split_node.id].tags['newnode'] = 'yes'
# Create a new way from the split_point to the end of the way
newway_nodes = [split_node.id] + way.nds[upper_split_index:]
newway = ElvegWay(tags=way.tags, nds=newway_nodes)
newway.elveg_tags = way.elveg_tags
splitway_id_list.append(newway.id)
osmobj.ways[newway.id] = newway
# Remove nodes for the new way from the old way
way.nds = way.nds[:upper_split_index] + [split_node.id]
# Finally, add the original way, which is the first segment of the
# newly split way.
splitway_id_list.append(way_id)
# Reverse direction so that first way segment comes first
return splitway_id_list[::-1]
def merge_equal(osmobj, merge_list):
if len(merge_list) <= 1:
# There is nothing to merge
return
# Remove nvdb-tags
for way_id in merge_list:
way = osmobj.ways[way_id]
way.tags.pop('nvdb:id')
way.tags.pop('nvdb:date')
way.tags.pop('nvdb:id:part', None)
# Split the ways in merge_list into subsets with identical tags
eq_tags_dict = collections.defaultdict(list)
for way_id in merge_list:
frozen_tags = frozenset(osmobj.ways[way_id].tags.items())
eq_tags_dict[frozen_tags].append(way_id)
# Loop through the subsets with equal tags
for merge_sublist in eq_tags_dict.itervalues():
# Index based on start-nodes and end nodes
ways_from_start_node = collections.defaultdict(list)
ways_from_end_node = collections.defaultdict(list)
for way_id in merge_sublist:
way = osmobj.ways[way_id]
ways_from_start_node[way.nds[0]].append(way_id)
ways_from_end_node[way.nds[-1]].append(way_id)
# Start merging ways where start/end meet
merge_subset = set(merge_sublist)
# DEBUG
#print "Starting merge_sublist " + str(merge_sublist)
while len(merge_subset) > 0:
new_way_string = collections.deque([merge_subset.pop()])
# Add backwards
first_node = osmobj.ways[new_way_string[0]].nds[0]
while (first_node in ways_from_end_node and
len(ways_from_end_node[first_node]) == 1 and
ways_from_end_node[first_node][0] in merge_subset):
new_way_string.extendleft(ways_from_end_node[first_node])
merge_subset.remove(ways_from_end_node[first_node][0])
first_node = osmobj.ways[new_way_string[0]].nds[0]
# Add forwards
last_node = osmobj.ways[new_way_string[-1]].nds[-1]
while (last_node in ways_from_start_node and
len(ways_from_start_node[last_node]) == 1 and
ways_from_start_node[last_node][0] in merge_subset):
new_way_string.extend(ways_from_start_node[last_node])
merge_subset.remove(ways_from_start_node[last_node][0])
last_node = osmobj.ways[new_way_string[-1]].nds[-1]
# new_way_string is ready built, so convert it back to a list
# (since deque cannot be sliced)
new_way_string = list(new_way_string)
# DEBUG
#print " Merge string" + str(new_way_string)
# Generate node-string for merged way based on way_ids in new_way_string
first_way = osmobj.ways[new_way_string[0]]
for way_id in new_way_string[1:]:
way = osmobj.ways[way_id]
first_length = len(first_way.nds)
way_length = len(way.nds)
if first_length + way_length - 1 <= 2000:
first_way.nds.extend(way.nds[1:])
osmobj.discard(way)
else:
first_way = way
###########################################################
# main #
###########################################################
# Read input arguments
directory = sys.argv[1]
if len(sys.argv) >= 3:
kommune_number = sys.argv[2]
else:
kommune_number = directory.strip('/')[-4:]
# Check that it is really a number
kummune_int = int(kommune_number)
# Find the names of the other files
osm_input = os.path.join(directory, kommune_number + 'Elveg_default.osm')
osm_output = os.path.join(directory, kommune_number + 'Elveg.osm')
elveg_fart = os.path.join(directory, kommune_number + 'Fart.txt')
elveg_hoyde = os.path.join(directory, kommune_number + 'Hoyde.txt')
osm_barrier_output = os.path.join(directory, kommune_number + 'detatched_barriers.osm')
osm_deleted_output = os.path.join(directory, kommune_number + 'deleted_elements.osm')
# Loop over speed limits and tags where the whole
# way where possible. Other places, add to split list
roaddata = {}
with open(elveg_fart, 'rb') as ef:
# Read first four header lines
ef_header = ef.next()
ef_export_line = ef.next()
ef_some_number = ef.next()
ef_empty_line = ef.next()
# Then use csv module for reading data
reader = csv.DictReader(ef, delimiter=';')
for row in reader:
transid = row[' TransID']
fart_start = int(row['Fra'])
fart_stop = int(row[' Til'])
fart_length = fart_stop - fart_start
fart_limit = row[' Fart']
fart_felt = row['felt']
if not roaddata.has_key(transid):
roaddata[transid] = {}
if not roaddata[transid].has_key('maxspeed'):
roaddata[transid]['maxspeed'] = []
roaddata[transid]['maxspeed'].append({'maxspeed': fart_limit,
'lanes': fart_felt,
'start': fart_start,
'stop': fart_stop})
# Add height limits to roaddata (if the file exists)
if not os.path.isfile(elveg_hoyde):
warn(u"File {0} does not exist and is not used".format(elveg_hoyde))
else:
with open(elveg_hoyde, 'rb') as eh:
# Read first four header lines
eh_header = eh.next()
eh_export_line = eh.next()
eh_empty_line1 = eh.next()
eh_empty_line2 = eh.next()
# Then use csv module for reading data
reader = csv.DictReader(eh, delimiter=';')
for row in reader:
transid = row[' TransID']
height_start = int(row['Fra'])
height_stop = int(row[' Til'])
height_length = height_stop - height_start
height_limit = row['H\xf8yde']
height_felt = row['felt']
if not roaddata.has_key(transid):
roaddata[transid] = {}
if not roaddata[transid].has_key('maxheight'):
roaddata[transid]['maxheight'] = []
roaddata[transid]['maxheight'].append({'maxheight': height_limit,
'lanes': height_felt,
'start': height_start,
'stop': height_stop})
# TODO: Add information from XXXXAksel.txt to roadddata,
# and add relevant tagging.
# Read OSM file
osmobj = ElvegOSM.load(osm_input)
# Loop through all nodes and move tags to elveg_tags
for nid,node in osmobj.nodes.items():
node.elveg_tags = node.tags
node.tags = {}
# Loop through all ways in osmobj and
# - swap original tags with OSM tags.
# - extract the way length from the Elveg VPA tag and
# store in roaddata structure
# Important to use items() instead of iteritems() here as we are adding
# items to the obmobj.ways dictionary during the loop.
for wid,w in osmobj.ways.items():
# Add new tags (using the create_osmtags function)
w.elveg_tags = w.tags
osm_tags = create_osmtags(w.elveg_tags)
w.tags = osm_tags
# Check that way has VPA Elveg tag
if not w.elveg_tags.has_key('VPA'):
warn(u"VPA missing for OBJTYPE {OBJTYPE} with TRANSID {TRANSID}".format(**w.elveg_tags))
continue
# Add way length as given by VPA to the roadddata structure
transid = w.elveg_tags['TRANSID']
vpa = [int(n.strip(':;')) for n in w.elveg_tags["VPA"].split()]
# We do not care about those ways where we have no data to add,
# so move to next if this is the case.
if not roaddata.has_key(transid):
continue
roaddata[transid]['length'] = vpa[2] - vpa[1]
# make a sorted list of meter values, including end
# points, where some roaddata may change
end_points = [0, roaddata[transid]['length']]
for restriction_type in ['maxspeed', 'maxheight']: # Add any new restrictions here
for endpoint_type in ['start', 'stop']:
end_points.extend([d[endpoint_type] for d in roaddata[transid].get(restriction_type, [])])
end_points = list(set(end_points))
end_points.sort()
# Handle the special case where a way has zero VPA length (i.e. vpa[1] == vpa[2])
# This would come out as endpoint == [0] in the previous block, but should be [0, 0] to compatible with the following
if end_points == [0]:
end_points = [0, 0]
# Test endpoints from .txt files against VPA lengths
# There are several ways where the end point is outside the VPA meter range
# Remove those TRANSIDs from the roaddata structure and move on to next way
if end_points[-1] > roaddata[transid]['length']:
warntemplate = u"Warning: End point {0} m outside of VPA length of road ({1} m) for TRANSID {2}"
warnstring = warntemplate.format(end_points[-1], roaddata[transid]['length'], transid)
warn(warnstring)
del roaddata[transid]
continue
# Make a list of intervals, representing the new ways after a split
# For most ways, there will be only one interval, but whenever
# the speed limit changes on a way or a height restriction
# does not apply to the whole way, there will be more than one interval
interval_list = zip(end_points[:-1],end_points[1:])
# Make a list of tags (maxheight=*, maxspeed=*)
# with one list entry per new way interval
newway_tags = [{} for i in interval_list] # I.e. a list of empty dicts
for i,interval in enumerate(interval_list):
for restriction_type in ['maxspeed', 'maxheight']: # Add any new restrictions here
for j,restr in enumerate(roaddata[transid].get(restriction_type, [])):
if restr['start'] <= interval[0] and interval[1] <= restr['stop']:
#if not w.elveg_tags.has_key('VKJORFLT'):
# print w.elveg_tags
#warn(str((restr['lanes'], w.elveg_tags.get('VKJORFLT'))))
if restr['lanes'] == w.elveg_tags.get('VKJORFLT', ''):
# The restriction applies to all lanes of the road
newway_tags[i][restriction_type] = restr[restriction_type]
elif restr['lanes'] == '1' and w.elveg_tags.get('VKJORFLT') == "1#2":
#warn("DEBUG: Heeding different lanes for restriction {} on TRANSID {} with lanes {} and restriction lanes {}".format(restriction_type, w.elveg_tags.get('TRANSID'), w.elveg_tags.get('VKJORFLT'), restr['lanes']))
newway_tags[i][restriction_type + ':forward'] = restr[restriction_type]
elif restr['lanes'] == '2' and w.elveg_tags.get('VKJORFLT') == "1#2":
#warn("DEBUG: Heeding different lanes for restriction {} on TRANSID {} with lanes {} and restriction lanes {}".format(restriction_type, w.elveg_tags.get('TRANSID'), w.elveg_tags.get('VKJORFLT'), restr['lanes']))
newway_tags[i][restriction_type + ':backward'] = restr[restriction_type]
else:
warn("Warning: Not heeding different lanes for restriction {} on TRANSID {} with lanes {} and restriction lanes {}".format(restriction_type,
w.elveg_tags.get('TRANSID'),
w.elveg_tags.get('VKJORFLT'),
restr['lanes']))
newway_tags[i][restriction_type] = restr[restriction_type]
# DEBUG: Remove later
#print newway_tags
# Split the way in osmobj into the right number of segments
split_points = end_points[1:-1]
segment_ids = split_way(osmobj, w.id, split_points)
# Add nvdb:id:part subkey to each part if the Elveg segment has been split
if len(segment_ids) > 1:
for i,segment_id in enumerate(segment_ids):
osmobj.ways[segment_id].tags['nvdb:id:part'] = str(i)
# Add maxheight and maxspeed restrictions
for i,segment_id in enumerate(segment_ids):
osmobj.ways[segment_id].tags.update(newway_tags[i])
# Loop through all ways
# - make a set of those nodes that are part of a way
way_node_ids = set()
for way in osmobj.ways.values():
way_node_ids.update(way.nds)
# ... and those that are not part of a way
noway_node_ids = set(osmobj.nodes).difference(way_node_ids)
# DATA CHECKING: Check if any way nodes also have tags, or if all tags
# are on duplicate nodes
#for waynode_id in way_nodes:
# waynode = osmobj.nodes[waynode_id]
# if len(waynode.tags) > 0:
# print waynode.tags
# Create OSM object for manual merging of off-way barriers
osmobj_barriers = ElvegOSM()
# Loop through and process all single nodes
for nid in noway_node_ids:
noway_node = osmobj.nodes[nid]
coord = (noway_node.lat, noway_node.lon)
if noway_node.elveg_tags['OBJTYPE'] == 'Vegsperring':
# Tag the barrier with OSM tags
vegsperringtype = noway_node.elveg_tags['VEGSPERRINGTYPE']
if vegsperringtype == 'Betongkjegle':
noway_node.tags['barrier'] = 'block'
elif vegsperringtype == u'Bilsperre':
# This seems to be any type of barrier that has wide enough
# openings to only stop cars.
noway_node.tags['barrier'] = 'yes'
elif vegsperringtype == u'Bussluse':
noway_node.tags['barrier'] = 'bus_trap'
elif vegsperringtype == u'L\xe5st bom':
noway_node.tags['barrier'] = 'gate'
elif vegsperringtype == u'New Jersey':
noway_node.tags['barrier'] = 'jersey_barrier'
elif vegsperringtype == u'R\xf8rgelender':
# This describes the material more than the actual barrier
# Similar to barrier=fence, but usually it is possible to
# walk or bike around
noway_node.tags['barrier'] = 'yes'
elif vegsperringtype == u'Steinblokk':
noway_node.tags['barrier'] = 'block'
elif vegsperringtype == u'Trafikkavviser':
# It seems that roads with this kind of barrier are
# best tagged as footways in OSM.
# I suppose the barrier itself could be anything.
noway_node.tags['barrier'] = 'yes'
elif vegsperringtype == u'Ukjent':
noway_node.tags['barrier'] = 'yes'
else:
warn(u"Unknown barrier: {0}".format(vegsperringtype))
noway_node.tags['barrier'] = 'yes'
elif noway_node.elveg_tags['OBJTYPE'] == 'Kommunedele':
# We do not use this tag, mark this node for deletion
noway_node.tags['DEBUG'] = 'Kommunedele'
noway_node.tags['action'] = 'delete'
elif noway_node.elveg_tags['OBJTYPE'] == 'Ferjekai':
# These nodes are not connected to the road network
# In OSM, they should ideally be on the node between the road and the ferry route.
noway_node.tags['amenity'] = 'ferry_terminal'
# TODO: Add amenity="ferry terminal" on nodes with OBJTYPE=Ferjekai
# Remove all ways and non-way nodes with action=delete and delete unused nodes
# Loop through ways, collect ways with action=delete and
# id of nodes in ways
to_delete = set()
nodes_used = set()
for way in osmobj.ways.itervalues():
if "action" in way.tags and way.tags['action'] == 'delete':
to_delete.add(way)
elif "nvdb:id" in way.tags and len(way.tags) == 1:
to_delete.add(way)
elif len(way.tags) == 0:
to_delete.add(way)
else:
for n in way.nds:
nodes_used.add(n)
# Collects nodes which should be deleted
for node in osmobj.nodes.itervalues():
if "action" in node.tags and node.tags['action'] == 'delete':
to_delete.add(node)
elif (node.id not in nodes_used) and (len(node.tags)) == 0:
to_delete.add(node)
# Delete objects from output and add them to a separate file
osmobj_deleted = ElvegOSM()
for element in to_delete:
osmobj.discard(element)
osmobj_deleted.add(element)
if hasattr(element, 'elveg_tags'):
element.tags.update({'Elveg:' + k:v for k,v in element.elveg_tags.iteritems()})
# Copy nodes needed by ways in osmobj_deleted
for delway in osmobj_deleted.ways.itervalues():
for nid in delway.nds:
if not osmobj_deleted.nodes.has_key(nid):
osmobj_deleted.add(osmobj.nodes[nid])
# Add way_id variable to every node which holds the way_ids of all ways
# it is part of.
# This is to be used in the node merging and must therefore be made after
# ways have been deleted above.
for node in osmobj.nodes.itervalues():
node.way_ids = set()
for way in osmobj.ways.itervalues():
for node_id in way.nds:
node = osmobj.nodes[node_id]
node.way_ids.add(way.id)
# Make a table with hash of indices of the nodes, in order to identify
# nodes with (exactly) the same coordinates
node_lookup = dict()
for id,node in osmobj.nodes.iteritems():
key = (node.lat, node.lon)
if node_lookup.has_key(key):
node_lookup[key].append(id)
else:
node_lookup[key] = [id]
# Merge nodes in same location
for coord, node_id_list in node_lookup.items():
if len(node_id_list) == 1:
continue
else:
merge_nodes(node_id_list)
# Speed limit cleanup
for id,way in osmobj.ways.iteritems():
# Remove maxspeed:forward or maxspeed:backward if maxspeed is present
# (i.e. inconsistent specification - either redundant or conflicting)
if way.tags.has_key('maxspeed:forward') or way.tags.has_key('maxspeed:backward'):
if way.tags.has_key('maxspeed'):
# Redundant or conflicting specification - report which before removing maxspeed
if (way.tags['maxspeed'] == way.tags.get('maxspeed:forward', way.tags['maxspeed']) and
way.tags['maxspeed'] == way.tags.get('maxspeed:backward', way.tags['maxspeed'])):
# Any maxspeed:forward and/or maxspeed:backward is compatible with maxspeed
# Remove those of maxspeed:forward and maxspeed:backward that exists
warn("Redundant maxspeed:forward and/or maxspeed:backward on TRANSID {}".format(way.elveg_tags['TRANSID']))
way.tags.pop('maxspeed:backward', None)
way.tags.pop('maxspeed:forward', None)
else:
# There are conflicts between maxspeed:forward/maxspeed:backward and maxspeed
# Delete all
warn("Inconsistent maxspeed/maxspeed:forward/maxspeed:backward on TRANSID {}: {}/{}/{}".format(
way.elveg_tags['TRANSID'],
way.tags['maxspeed'],
way.tags.get('maxspeed:forward'),
way.tags.get('maxspeed:backward')))
way.tags.pop('maxspeed', None)
way.tags.pop('maxspeed:backward', None)
way.tags.pop('maxspeed:forward', None)
# Join maxspeed:forward and maxspeed:backward if they are equal
# (this should have been unnecessary, but such limits are present in the data)
if (way.tags.has_key('maxspeed:forward') and
way.tags.has_key('maxspeed:backward') and
way.tags['maxspeed:forward'] == way.tags['maxspeed:backward']):
way.tags['maxspeed'] = way.tags['maxspeed:forward']
del way.tags['maxspeed:forward']
del way.tags['maxspeed:backward']
# If there is only :forward or :backward, apply to whole way
if (way.tags.has_key('maxspeed:forward') and not way.tags.has_key('maxspeed:backward')):
way.tags['maxspeed'] = way.tags['maxspeed:forward']
del way.tags['maxspeed:forward']
if (way.tags.has_key('maxspeed:backward') and not way.tags.has_key('maxspeed:forward')):
way.tags['maxspeed'] = way.tags['maxspeed:backward']
del way.tags['maxspeed:backward']
# Remove the default speed limit of 50, since that may
# be only due to missing reporting
if (way.tags.get('maxspeed', None) == '50' and
way.tags.get('highway',None) not in ('trunk', 'primary', 'secondary')):
del way.tags['maxspeed']
# Remove speed limits for non-roads (footway, cycleway, etc.)
if (way.tags.has_key('maxspeed') and
way.tags.get('highway', None) not in ('trunk', 'primary', 'secondary', 'road', 'unclassified', 'residential', 'service')):
del way.tags['maxspeed']
# Save barriers that are not merged to other nodes to a separate file
for id,node in osmobj.nodes.items():
if not node.way_ids:
osmobj_barriers.nodes[id] = node
del osmobj.nodes[id]
# -- Merge ways with same VNR/VPA and identical non-nvdb tags --
# Build index over roads with identical VNR/VPA
vpa_index = {}
for way_id,way in osmobj.ways.iteritems():
try:
vnr = way.elveg_tags['VNR']
vpa, start, stop = [s.strip(':;') for s in way.elveg_tags['VPA'].split()]
except KeyError:
continue
except AttributeError as e:
warn(str(e) + ' ' + str(way_id))
continue
if vpa_index.has_key((vnr, vpa)):
vpa_index[(vnr, vpa)].append(way_id)
else:
vpa_index[(vnr, vpa)] = [way_id]
# Loop through all "groups" in vpa_index and merge what is possible
for merge_list in vpa_index.itervalues():
merge_equal(osmobj, merge_list)
# Reverse ways with oneway=-1
for id,way in osmobj.ways.iteritems():
if way.tags.get('oneway', None) == '-1':
way.nds.reverse()
way.tags['oneway'] = 'yes'
# TODO: Add turn restrictions from XXXXSving.txt
osmobj.save(osm_output)
osmobj_barriers.save(osm_barrier_output)
osmobj_deleted.save(osm_deleted_output)
|
DavisDevelopment/app_engine_demo
|
refs/heads/master
|
lib/werkzeug/testsuite/routing.py
|
145
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.routing
~~~~~~~~~~~~~~~~~~~~~~~~~~
Routing tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import routing as r
from werkzeug.wrappers import Response
from werkzeug.datastructures import ImmutableDict
from werkzeug.test import create_environ
class RoutingTestCase(WerkzeugTestCase):
def test_basic_routing(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('index', {})
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/bar/') == ('bar', {})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/bar'))
self.assert_raises(r.NotFound, lambda: adapter.match('/blub'))
adapter = map.bind('example.org', '/test')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/test/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args={'aha': 'muhaha'})
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args='aha=muhaha')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind_to_environ(create_environ('/bar?foo=bar',
'http://example.org/'))
try:
adapter.match()
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?foo=bar'
else:
self.fail('Expected request redirect')
def test_environ_defaults(self):
environ = create_environ("/foo")
self.assert_strict_equal(environ["PATH_INFO"], '/foo')
m = r.Map([r.Rule("/foo", endpoint="foo"), r.Rule("/bar", endpoint="bar")])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match("/foo"), ('foo', {}))
self.assert_strict_equal(a.match(), ('foo', {}))
self.assert_strict_equal(a.match("/bar"), ('bar', {}))
self.assert_raises(r.NotFound, a.match, "/bars")
def test_environ_nonascii_pathinfo(self):
environ = create_environ(u'/лошадь')
m = r.Map([
r.Rule(u'/', endpoint='index'),
r.Rule(u'/лошадь', endpoint='horse')
])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match(u'/'), ('index', {}))
self.assert_strict_equal(a.match(u'/лошадь'), ('horse', {}))
self.assert_raises(r.NotFound, a.match, u'/барсук')
def test_basic_building(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/<baz>', endpoint='bar'),
r.Rule('/bar/<int:bazi>', endpoint='bari'),
r.Rule('/bar/<float:bazf>', endpoint='barf'),
r.Rule('/bar/<path:bazp>', endpoint='barp'),
r.Rule('/hehe', endpoint='blah', subdomain='blah')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/'
assert adapter.build('foo', {}) == 'http://example.org/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/bar/la/di'
assert adapter.build('blah', {}) == '/hehe'
self.assert_raises(r.BuildError, lambda: adapter.build('urks'))
adapter = map.bind('example.org', '/test', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/test/'
assert adapter.build('foo', {}) == 'http://example.org/test/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/test/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/test/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/test/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/test/bar/la/di'
assert adapter.build('blah', {}) == '/test/hehe'
def test_defaults(self):
map = r.Map([
r.Rule('/foo/', defaults={'page': 1}, endpoint='foo'),
r.Rule('/foo/<int:page>', endpoint='foo')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo/') == ('foo', {'page': 1})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/foo/1'))
assert adapter.match('/foo/2') == ('foo', {'page': 2})
assert adapter.build('foo', {}) == '/foo/'
assert adapter.build('foo', {'page': 1}) == '/foo/'
assert adapter.build('foo', {'page': 2}) == '/foo/2'
def test_greedy(self):
map = r.Map([
r.Rule('/foo', endpoint='foo'),
r.Rule('/<path:bar>', endpoint='bar'),
r.Rule('/<path:bar>/<path:blub>', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/blub') == ('bar', {'bar': 'blub'})
assert adapter.match('/he/he') == ('bar', {'bar': 'he', 'blub': 'he'})
assert adapter.build('foo', {}) == '/foo'
assert adapter.build('bar', {'bar': 'blub'}) == '/blub'
assert adapter.build('bar', {'bar': 'blub', 'blub': 'bar'}) == '/blub/bar'
def test_path(self):
map = r.Map([
r.Rule('/', defaults={'name': 'FrontPage'}, endpoint='page'),
r.Rule('/Special', endpoint='special'),
r.Rule('/<int:year>', endpoint='year'),
r.Rule('/<path:name>', endpoint='page'),
r.Rule('/<path:name>/edit', endpoint='editpage'),
r.Rule('/<path:name>/silly/<path:name2>', endpoint='sillypage'),
r.Rule('/<path:name>/silly/<path:name2>/edit', endpoint='editsillypage'),
r.Rule('/Talk:<path:name>', endpoint='talk'),
r.Rule('/User:<username>', endpoint='user'),
r.Rule('/User:<username>/<path:name>', endpoint='userpage'),
r.Rule('/Files/<path:file>', endpoint='files'),
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('page', {'name':'FrontPage'})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/FrontPage'))
assert adapter.match('/Special') == ('special', {})
assert adapter.match('/2007') == ('year', {'year':2007})
assert adapter.match('/Some/Page') == ('page', {'name':'Some/Page'})
assert adapter.match('/Some/Page/edit') == ('editpage', {'name':'Some/Page'})
assert adapter.match('/Foo/silly/bar') == ('sillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Foo/silly/bar/edit') == ('editsillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Talk:Foo/Bar') == ('talk', {'name':'Foo/Bar'})
assert adapter.match('/User:thomas') == ('user', {'username':'thomas'})
assert adapter.match('/User:thomas/projects/werkzeug') == \
('userpage', {'username':'thomas', 'name':'projects/werkzeug'})
assert adapter.match('/Files/downloads/werkzeug/0.2.zip') == \
('files', {'file':'downloads/werkzeug/0.2.zip'})
def test_dispatch(self):
env = create_environ('/')
map = r.Map([
r.Rule('/', endpoint='root'),
r.Rule('/foo/', endpoint='foo')
])
adapter = map.bind_to_environ(env)
raise_this = None
def view_func(endpoint, values):
if raise_this is not None:
raise raise_this
return Response(repr((endpoint, values)))
dispatch = lambda p, q=False: Response.force_type(adapter.dispatch(view_func, p,
catch_http_exceptions=q), env)
assert dispatch('/').data == b"('root', {})"
assert dispatch('/foo').status_code == 301
raise_this = r.NotFound()
self.assert_raises(r.NotFound, lambda: dispatch('/bar'))
assert dispatch('/bar', True).status_code == 404
def test_http_host_before_server_name(self):
env = {
'HTTP_HOST': 'wiki.example.com',
'SERVER_NAME': 'web0.example.com',
'SERVER_PORT': '80',
'SCRIPT_NAME': '',
'PATH_INFO': '',
'REQUEST_METHOD': 'GET',
'wsgi.url_scheme': 'http'
}
map = r.Map([r.Rule('/', endpoint='index', subdomain='wiki')])
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.match('/') == ('index', {})
assert adapter.build('index', force_external=True) == 'http://wiki.example.com/'
assert adapter.build('index') == '/'
env['HTTP_HOST'] = 'admin.example.com'
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.build('index') == 'http://wiki.example.com/'
def test_adapter_url_parameter_sorting(self):
map = r.Map([r.Rule('/', endpoint='index')], sort_parameters=True,
sort_key=lambda x: x[1])
adapter = map.bind('localhost', '/')
assert adapter.build('index', {'x': 20, 'y': 10, 'z': 30},
force_external=True) == 'http://localhost/?y=10&x=20&z=30'
def test_request_direct_charset_bug(self):
map = r.Map([r.Rule(u'/öäü/')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/öäü')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/%C3%B6%C3%A4%C3%BC/'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}),
r.Rule(u'/foo/<int:bar>')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/foo'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default_subdomain(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}, subdomain='test'),
r.Rule(u'/foo/<int:bar>', subdomain='other')])
adapter = map.bind('localhost', '/', subdomain='other')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://test.localhost/foo'
else:
self.fail('expected request redirect exception')
def test_adapter_match_return_rule(self):
rule = r.Rule('/foo/', endpoint='foo')
map = r.Map([rule])
adapter = map.bind('localhost', '/')
assert adapter.match('/foo/', return_rule=True) == (rule, {})
def test_server_name_interpolation(self):
server_name = 'example.invalid'
map = r.Map([r.Rule('/', endpoint='index'),
r.Rule('/', endpoint='alt', subdomain='alt')])
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('index', {})
env = create_environ('/', 'http://alt.%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('alt', {})
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name='foo')
assert adapter.subdomain == '<invalid>'
def test_rule_emptying(self):
rule = r.Rule('/foo', {'meh': 'muh'}, 'x', ['POST'],
False, 'x', True, None)
rule2 = rule.empty()
assert rule.__dict__ == rule2.__dict__
rule.methods.add('GET')
assert rule.__dict__ != rule2.__dict__
rule.methods.discard('GET')
rule.defaults['meh'] = 'aha'
assert rule.__dict__ != rule2.__dict__
def test_rule_templates(self):
testcase = r.RuleTemplate(
[ r.Submount('/test/$app',
[ r.Rule('/foo/', endpoint='handle_foo')
, r.Rule('/bar/', endpoint='handle_bar')
, r.Rule('/baz/', endpoint='handle_baz')
]),
r.EndpointPrefix('${app}',
[ r.Rule('/${app}-blah', endpoint='bar')
, r.Rule('/${app}-meh', endpoint='baz')
]),
r.Subdomain('$app',
[ r.Rule('/blah', endpoint='x_bar')
, r.Rule('/meh', endpoint='x_baz')
])
])
url_map = r.Map(
[ testcase(app='test1')
, testcase(app='test2')
, testcase(app='test3')
, testcase(app='test4')
])
out = sorted([(x.rule, x.subdomain, x.endpoint)
for x in url_map.iter_rules()])
assert out == ([
('/blah', 'test1', 'x_bar'),
('/blah', 'test2', 'x_bar'),
('/blah', 'test3', 'x_bar'),
('/blah', 'test4', 'x_bar'),
('/meh', 'test1', 'x_baz'),
('/meh', 'test2', 'x_baz'),
('/meh', 'test3', 'x_baz'),
('/meh', 'test4', 'x_baz'),
('/test/test1/bar/', '', 'handle_bar'),
('/test/test1/baz/', '', 'handle_baz'),
('/test/test1/foo/', '', 'handle_foo'),
('/test/test2/bar/', '', 'handle_bar'),
('/test/test2/baz/', '', 'handle_baz'),
('/test/test2/foo/', '', 'handle_foo'),
('/test/test3/bar/', '', 'handle_bar'),
('/test/test3/baz/', '', 'handle_baz'),
('/test/test3/foo/', '', 'handle_foo'),
('/test/test4/bar/', '', 'handle_bar'),
('/test/test4/baz/', '', 'handle_baz'),
('/test/test4/foo/', '', 'handle_foo'),
('/test1-blah', '', 'test1bar'),
('/test1-meh', '', 'test1baz'),
('/test2-blah', '', 'test2bar'),
('/test2-meh', '', 'test2baz'),
('/test3-blah', '', 'test3bar'),
('/test3-meh', '', 'test3baz'),
('/test4-blah', '', 'test4bar'),
('/test4-meh', '', 'test4baz')
])
def test_non_string_parts(self):
m = r.Map([
r.Rule('/<foo>', endpoint='foo')
])
a = m.bind('example.com')
self.assert_equal(a.build('foo', {'foo': 42}), '/42')
def test_complex_routing_rules(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/<int:blub>', endpoint='an_int'),
r.Rule('/<blub>', endpoint='a_string'),
r.Rule('/foo/', endpoint='nested'),
r.Rule('/foobar/', endpoint='nestedbar'),
r.Rule('/foo/<path:testing>/', endpoint='nested_show'),
r.Rule('/foo/<path:testing>/edit', endpoint='nested_edit'),
r.Rule('/users/', endpoint='users', defaults={'page': 1}),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/foox', endpoint='foox'),
r.Rule('/<path:bar>/<path:blub>', endpoint='barx_path_path')
])
a = m.bind('example.com')
assert a.match('/') == ('index', {})
assert a.match('/42') == ('an_int', {'blub': 42})
assert a.match('/blub') == ('a_string', {'blub': 'blub'})
assert a.match('/foo/') == ('nested', {})
assert a.match('/foobar/') == ('nestedbar', {})
assert a.match('/foo/1/2/3/') == ('nested_show', {'testing': '1/2/3'})
assert a.match('/foo/1/2/3/edit') == ('nested_edit', {'testing': '1/2/3'})
assert a.match('/users/') == ('users', {'page': 1})
assert a.match('/users/page/2') == ('users', {'page': 2})
assert a.match('/foox') == ('foox', {})
assert a.match('/1/2/3') == ('barx_path_path', {'bar': '1', 'blub': '2/3'})
assert a.build('index') == '/'
assert a.build('an_int', {'blub': 42}) == '/42'
assert a.build('a_string', {'blub': 'test'}) == '/test'
assert a.build('nested') == '/foo/'
assert a.build('nestedbar') == '/foobar/'
assert a.build('nested_show', {'testing': '1/2/3'}) == '/foo/1/2/3/'
assert a.build('nested_edit', {'testing': '1/2/3'}) == '/foo/1/2/3/edit'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
assert a.build('foox') == '/foox'
assert a.build('barx_path_path', {'bar': '1', 'blub': '2/3'}) == '/1/2/3'
def test_default_converters(self):
class MyMap(r.Map):
default_converters = r.Map.default_converters.copy()
default_converters['foo'] = r.UnicodeConverter
assert isinstance(r.Map.default_converters, ImmutableDict)
m = MyMap([
r.Rule('/a/<foo:a>', endpoint='a'),
r.Rule('/b/<foo:b>', endpoint='b'),
r.Rule('/c/<c>', endpoint='c')
], converters={'bar': r.UnicodeConverter})
a = m.bind('example.org', '/')
assert a.match('/a/1') == ('a', {'a': '1'})
assert a.match('/b/2') == ('b', {'b': '2'})
assert a.match('/c/3') == ('c', {'c': '3'})
assert 'foo' not in r.Map.default_converters
def test_build_append_unknown(self):
map = r.Map([
r.Rule('/bar/<float:bazf>', endpoint='barf')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0}) == \
'http://example.org/bar/0.815?bif=1.0'
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0},
append_unknown=False) == 'http://example.org/bar/0.815'
def test_method_fallback(self):
map = r.Map([
r.Rule('/', endpoint='index', methods=['GET']),
r.Rule('/<name>', endpoint='hello_name', methods=['GET']),
r.Rule('/select', endpoint='hello_select', methods=['POST']),
r.Rule('/search_get', endpoint='search', methods=['GET']),
r.Rule('/search_post', endpoint='search', methods=['POST'])
])
adapter = map.bind('example.com')
assert adapter.build('index') == '/'
assert adapter.build('index', method='GET') == '/'
assert adapter.build('hello_name', {'name': 'foo'}) == '/foo'
assert adapter.build('hello_select') == '/select'
assert adapter.build('hello_select', method='POST') == '/select'
assert adapter.build('search') == '/search_get'
assert adapter.build('search', method='GET') == '/search_get'
assert adapter.build('search', method='POST') == '/search_post'
def test_implicit_head(self):
url_map = r.Map([
r.Rule('/get', methods=['GET'], endpoint='a'),
r.Rule('/post', methods=['POST'], endpoint='b')
])
adapter = url_map.bind('example.org')
assert adapter.match('/get', method='HEAD') == ('a', {})
self.assert_raises(r.MethodNotAllowed, adapter.match,
'/post', method='HEAD')
def test_protocol_joining_bug(self):
m = r.Map([r.Rule('/<foo>', endpoint='x')])
a = m.bind('example.org')
assert a.build('x', {'foo': 'x:y'}) == '/x:y'
assert a.build('x', {'foo': 'x:y'}, force_external=True) == \
'http://example.org/x:y'
def test_allowed_methods_querying(self):
m = r.Map([r.Rule('/<foo>', methods=['GET', 'HEAD']),
r.Rule('/foo', methods=['POST'])])
a = m.bind('example.org')
assert sorted(a.allowed_methods('/foo')) == ['GET', 'HEAD', 'POST']
def test_external_building_with_port(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind('example.org:5000', '/')
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind_to_environ(
create_environ('/', 'http://example.org:5000/'),
server_name="example.org:5000"
)
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ_wrong_servername(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
environ = create_environ('/', 'http://example.org:5000/')
adapter = map.bind_to_environ(environ, server_name="example.org")
assert adapter.subdomain == '<invalid>'
def test_converter_parser(self):
args, kwargs = r.parse_converter_args(u'test, a=1, b=3.0')
assert args == ('test',)
assert kwargs == {'a': 1, 'b': 3.0 }
args, kwargs = r.parse_converter_args('')
assert not args and not kwargs
args, kwargs = r.parse_converter_args('a, b, c,')
assert args == ('a', 'b', 'c')
assert not kwargs
args, kwargs = r.parse_converter_args('True, False, None')
assert args == (True, False, None)
args, kwargs = r.parse_converter_args('"foo", u"bar"')
assert args == ('foo', 'bar')
def test_alias_redirects(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/index.html', endpoint='index', alias=True),
r.Rule('/users/', defaults={'page': 1}, endpoint='users'),
r.Rule('/users/index.html', defaults={'page': 1}, alias=True,
endpoint='users'),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/users/page-<int:page>.html', alias=True, endpoint='users'),
])
a = m.bind('example.com')
def ensure_redirect(path, new_url, args=None):
try:
a.match(path, query_args=args)
except r.RequestRedirect as e:
assert e.new_url == 'http://example.com' + new_url
else:
assert False, 'expected redirect'
ensure_redirect('/index.html', '/')
ensure_redirect('/users/index.html', '/users/')
ensure_redirect('/users/page-2.html', '/users/page/2')
ensure_redirect('/users/page-1.html', '/users/')
ensure_redirect('/users/page-1.html', '/users/?foo=bar', {'foo': 'bar'})
assert a.build('index') == '/'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
def test_double_defaults(self):
for prefix in '', '/aaa':
m = r.Map([
r.Rule(prefix + '/', defaults={'foo': 1, 'bar': False}, endpoint='x'),
r.Rule(prefix + '/<int:foo>', defaults={'bar': False}, endpoint='x'),
r.Rule(prefix + '/bar/', defaults={'foo': 1, 'bar': True}, endpoint='x'),
r.Rule(prefix + '/bar/<int:foo>', defaults={'bar': True}, endpoint='x')
])
a = m.bind('example.com')
assert a.match(prefix + '/') == ('x', {'foo': 1, 'bar': False})
assert a.match(prefix + '/2') == ('x', {'foo': 2, 'bar': False})
assert a.match(prefix + '/bar/') == ('x', {'foo': 1, 'bar': True})
assert a.match(prefix + '/bar/2') == ('x', {'foo': 2, 'bar': True})
assert a.build('x', {'foo': 1, 'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 2, 'bar': False}) == prefix + '/2'
assert a.build('x', {'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 1, 'bar': True}) == prefix + '/bar/'
assert a.build('x', {'foo': 2, 'bar': True}) == prefix + '/bar/2'
assert a.build('x', {'bar': True}) == prefix + '/bar/'
def test_host_matching(self):
m = r.Map([
r.Rule('/', endpoint='index', host='www.<domain>'),
r.Rule('/', endpoint='files', host='files.<domain>'),
r.Rule('/foo/', defaults={'page': 1}, host='www.<domain>', endpoint='x'),
r.Rule('/<int:page>', host='files.<domain>', endpoint='x')
], host_matching=True)
a = m.bind('www.example.com')
assert a.match('/') == ('index', {'domain': 'example.com'})
assert a.match('/foo/') == ('x', {'domain': 'example.com', 'page': 1})
try:
a.match('/foo')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
a = m.bind('files.example.com')
assert a.match('/') == ('files', {'domain': 'example.com'})
assert a.match('/2') == ('x', {'domain': 'example.com', 'page': 2})
try:
a.match('/1')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
def test_server_name_casing(self):
m = r.Map([
r.Rule('/', endpoint='index', subdomain='foo')
])
env = create_environ()
env['SERVER_NAME'] = env['HTTP_HOST'] = 'FOO.EXAMPLE.COM'
a = m.bind_to_environ(env, server_name='example.com')
assert a.match('/') == ('index', {})
env = create_environ()
env['SERVER_NAME'] = '127.0.0.1'
env['SERVER_PORT'] = '5000'
del env['HTTP_HOST']
a = m.bind_to_environ(env, server_name='example.com')
try:
a.match()
except r.NotFound:
pass
else:
assert False, 'Expected not found exception'
def test_redirect_request_exception_code(self):
exc = r.RequestRedirect('http://www.google.com/')
exc.code = 307
env = create_environ()
self.assert_strict_equal(exc.get_response(env).status_code, exc.code)
def test_redirect_path_quoting(self):
url_map = r.Map([
r.Rule('/<category>', defaults={'page': 1}, endpoint='category'),
r.Rule('/<category>/page/<int:page>', endpoint='category')
])
adapter = url_map.bind('example.com')
try:
adapter.match('/foo bar/page/1')
except r.RequestRedirect as e:
response = e.get_response({})
self.assert_strict_equal(response.headers['location'],
u'http://example.com/foo%20bar')
else:
self.fail('Expected redirect')
def test_unicode_rules(self):
m = r.Map([
r.Rule(u'/войти/', endpoint='enter'),
r.Rule(u'/foo+bar/', endpoint='foobar')
])
a = m.bind(u'☃.example.com')
try:
a.match(u'/войти')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
endpoint, values = a.match(u'/войти/')
self.assert_strict_equal(endpoint, 'enter')
self.assert_strict_equal(values, {})
try:
a.match(u'/foo+bar')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'foo+bar/')
endpoint, values = a.match(u'/foo+bar/')
self.assert_strict_equal(endpoint, 'foobar')
self.assert_strict_equal(values, {})
url = a.build('enter', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
url = a.build('foobar', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/foo+bar/')
def test_map_repr(self):
m = r.Map([
r.Rule(u'/wat', endpoint='enter'),
r.Rule(u'/woop', endpoint='foobar')
])
rv = repr(m)
self.assert_strict_equal(rv,
"Map([<Rule '/woop' -> foobar>, <Rule '/wat' -> enter>])")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RoutingTestCase))
return suite
|
weaver-viii/h2o-3
|
refs/heads/master
|
h2o-py/tests/testdir_algos/gbm/pyunit_irisGBM.py
|
3
|
import sys
sys.path.insert(1, "../../../")
import h2o
######################################################
#
# Sample Running GBM on iris_wheader.csv
def irisGBM(ip,port):
# Connect to a pre-existing cluster
# connect to localhost:54321
# Import training data
train = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
train.describe()
# Run GBM
my_gbm = h2o.gbm( y=train["class"],
validation_y=train["class"],
x=train[1:4],
validation_x=train[1:4],
ntrees=50,
learn_rate=0.1,
distribution="multinomial")
my_gbm.show()
my_gbm_metrics = my_gbm.model_performance(train)
my_gbm_metrics.show()
my_gbm_metrics #.show(criterion=my_gbm_metrics.theCriteria.PRECISION)
if __name__ == "__main__":
h2o.run_test(sys.argv, irisGBM)
|
FatBumbleee/namebench
|
refs/heads/master
|
nb_third_party/dns/rrset.py
|
215
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS RRsets (an RRset is a named rdataset)"""
import dns.name
import dns.rdataset
import dns.rdataclass
import dns.renderer
class RRset(dns.rdataset.Rdataset):
"""A DNS RRset (named rdataset).
RRset inherits from Rdataset, and RRsets can be treated as
Rdatasets in most cases. There are, however, a few notable
exceptions. RRsets have different to_wire() and to_text() method
arguments, reflecting the fact that RRsets always have an owner
name.
"""
__slots__ = ['name', 'deleting']
def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
deleting=None):
"""Create a new RRset."""
super(RRset, self).__init__(rdclass, rdtype)
self.name = name
self.deleting = deleting
def _clone(self):
obj = super(RRset, self)._clone()
obj.name = self.name
obj.deleting = self.deleting
return obj
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
if not self.deleting is None:
dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
else:
dtext = ''
return '<DNS ' + str(self.name) + ' ' + \
dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two RRsets are equal if they have the same name and the same
rdataset
@rtype: bool"""
if not isinstance(other, RRset):
return False
if self.name != other.name:
return False
return super(RRset, self).__eq__(other)
def match(self, name, rdclass, rdtype, covers, deleting=None):
"""Returns True if this rrset matches the specified class, type,
covers, and deletion state."""
if not super(RRset, self).match(rdclass, rdtype, covers):
return False
if self.name != name or self.deleting != deleting:
return False
return True
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the RRset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
return super(RRset, self).to_text(self.name, origin, relativize,
self.deleting, **kw)
def to_wire(self, file, compress=None, origin=None, **kw):
"""Convert the RRset to wire format."""
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw)
def to_rdataset(self):
"""Convert an RRset into an Rdataset.
@rtype: dns.rdataset.Rdataset object
"""
return dns.rdataset.from_rdata_list(self.ttl, list(self))
def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type, and with
the specified list of rdatas in text format.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
r = RRset(name, rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type and with
the specified rdatas in text format.
@rtype: dns.rrset.RRset object
"""
return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
def from_rdata_list(name, ttl, rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified list of rdata objects.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = RRset(name, rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(name, ttl, *rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified rdata objects.
@rtype: dns.rrset.RRset object
"""
return from_rdata_list(name, ttl, rdatas)
|
herilalaina/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/logistic.py
|
2
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.fixes import logsumexp
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import NotFittedError
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import get_scorer
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']:
raise ValueError("Logistic Regression supports only liblinear, "
"newton-cg, lbfgs, sag and saga solvers, got %s"
% solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver not in ['liblinear', 'saga']:
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear':
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Does not work for
liblinear solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' and
'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, six.string_types):
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, default: 1e-4
Tolerance for stopping criteria.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance or None, optional, default: None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'},
default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Does not work for liblinear
solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default: 1
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver``is set
to 'liblinear' regardless of whether 'multi_class' is specified or
not. If given a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
LogisticRegressionCV : Logistic regression with built-in cross validation
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.solver in ['newton-cg']:
_dtype = [np.float64, np.float32]
else:
_dtype = np.float64
X, y = check_X_y(X, y, accept_sparse='csr', dtype=_dtype,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
if self.n_jobs != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(self.n_jobs))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
backend = 'threading'
else:
backend = 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=self.penalty,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
scoring : string, callable, or None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'},
default: 'lbfgs'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag', 'saga' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
if self.solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if self.multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
backend = 'threading'
else:
backend = 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_encoded_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if self.multi_class == 'ovr':
# The scores_ / coefs_paths_ dict have unencoded class
# labels as their keys
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
|
jrief/django-filer
|
refs/heads/serialize-payload
|
filer/utils/files.py
|
10
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.utils.text import get_valid_filename as get_valid_filename_django
from django.template.defaultfilters import slugify
from django.core.files.uploadedfile import SimpleUploadedFile
class UploadException(Exception):
pass
def handle_upload(request):
if not request.method == "POST":
raise UploadException("AJAX request not valid: must be POST")
if request.is_ajax():
# the file is stored raw in the request
is_raw = True
filename = request.GET.get('qqfile', False) or request.GET.get('filename', False) or ''
upload = SimpleUploadedFile(name=filename, content=request.body)
else:
if len(request.FILES) == 1:
# FILES is a dictionary in Django but Ajax Upload gives the uploaded file an
# ID based on a random number, so it cannot be guessed here in the code.
# Rather than editing Ajax Upload to pass the ID in the querystring, note that
# each upload is a separate request so FILES should only have one entry.
# Thus, we can just grab the first (and only) value in the dict.
is_raw = False
upload = list(request.FILES.values())[0]
filename = upload.name
else:
raise UploadException("AJAX request not valid: Bad Upload")
return upload, filename, is_raw
def get_valid_filename(s):
"""
like the regular get_valid_filename, but also slugifies away
umlauts and stuff.
"""
s = get_valid_filename_django(s)
filename, ext = os.path.splitext(s)
filename = slugify(filename)
ext = slugify(ext)
if ext:
return "%s.%s" % (filename, ext)
else:
return "%s" % (filename,)
|
TeamWin/kernel_samsung_lt02ltetmo
|
refs/heads/android-4.4
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
talishte/ctigre
|
refs/heads/master
|
env/lib/python2.7/site-packages/mezzanine/pages/defaults.py
|
56
|
"""
Default settings for the ``mezzanine.pages`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ADD_PAGE_ORDER",
description=_("A sequence of ``Page`` subclasses in the format "
"``app_label.model_name``, that controls the ordering of items "
"in the select drop-down for adding new pages within the admin "
"page tree interface."),
editable=False,
default=("pages.RichTextPage",),
)
register_setting(
name="PAGE_MENU_TEMPLATES",
description=_("A sequence of templates used by the ``page_menu`` "
"template tag. Each item in the sequence is a three item sequence, "
"containing a unique ID for the template, a label for the template, "
"and the template path. These templates are then available for "
"selection when editing which menus a page should appear in. Note "
"that if a menu template is used that doesn't appear in this "
"setting, all pages will appear in it."),
editable=False,
default=(
(1, _("Top navigation bar"), "pages/menus/dropdown.html"),
(2, _("Left-hand tree"), "pages/menus/tree.html"),
(3, _("Footer"), "pages/menus/footer.html"),
),
)
register_setting(
name="PAGE_MENU_TEMPLATES_DEFAULT",
description=_("A sequence of IDs from the ``PAGE_MENU_TEMPLATES`` "
"setting that defines the default menu templates selected when "
"creating new pages. By default all menu templates are selected. "
"Set this setting to an empty sequence to have no templates "
"selected by default."),
editable=False,
default=None,
)
register_setting(
name="PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED",
description=_("If ``True``, pages with ``login_required`` checked will "
"still be listed in menus and search results, for unauthenticated "
"users. Regardless of this setting, when an unauthenticated user "
"accesses a page with ``login_required`` checked, they'll be "
"redirected to the login page."),
editable=False,
default=False,
)
|
Donkyhotay/MoonPy
|
refs/heads/master
|
zope/publisher/tests/basetestiapplicationrequest.py
|
1
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""IApplicationRequest Base Test
$Id: basetestiapplicationrequest.py 38967 2005-10-08 16:27:57Z torsti $
"""
from zope.interface.verify import verifyObject
from zope.publisher.interfaces import IApplicationRequest
from zope.interface.common.tests.basemapping import BaseTestIEnumerableMapping
from zope.interface.common.tests.basemapping import testIReadMapping
class BaseTestIApplicationRequest(BaseTestIEnumerableMapping):
def testVerifyIApplicationRequest(self):
verifyObject(IApplicationRequest, self._Test__new())
def testHaveCustomTestsForIApplicationRequest(self):
# Make sure that tests are defined for things we can't test here
self.test_IApplicationRequest_bodyStream
def testEnvironment(self):
request = self._Test__new(foo='Foo', bar='Bar')
try:
request.environment = {}
except AttributeError:
pass
else:
raise "Shouldn't be able to set environment"
environment = request.environment
testIReadMapping(self, environment,
{'foo': 'Foo', 'bar': 'Bar'},
['splat'])
|
elsigh/browserscope
|
refs/heads/master
|
third_party/mox/mox.py
|
9
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import inspect
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __str__(self):
return "<MockAnything instance at %s>" % id(self)
def __repr__(self):
return self.__str__()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name, method_to_mock=None):
"""Create a new mock method call and return it.
Args:
# method_name: the name of the method being called.
# method_to_mock: The actual method being mocked, used for introspection.
method_name: str
method_to_mock: a method object
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode, method_to_mock=method_to_mock)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(
name,
method_to_mock=getattr(self._class_to_mock, name))
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __contains__(self, key):
"""Provide custom logic for mocking classes that contain items.
Args:
key: Key to look in container for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__contains__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not implement __contains__
UnexpectedMethodCaller if the object does not expect the call to
__contains__.
"""
contains = self._class_to_mock.__dict__.get('__contains__', None)
if contains is None:
raise TypeError('unsubscriptable object')
if self._replay_mode:
return MockMethod('__contains__', self._expected_calls_queue,
self._replay_mode)(key)
return self._CreateMockMethod('__contains__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MethodCallChecker(object):
"""Ensures that methods are called correctly."""
_NEEDED, _DEFAULT, _GIVEN = range(3)
def __init__(self, method):
"""Creates a checker.
Args:
# method: A method to check.
method: function
Raises:
ValueError: method could not be inspected, so checks aren't possible.
Some methods and functions like built-ins can't be inspected.
"""
try:
self._args, varargs, varkw, defaults = inspect.getargspec(method)
except TypeError:
raise ValueError('Could not get argument specification for %r'
% (method,))
if inspect.ismethod(method):
self._args = self._args[1:] # Skip 'self'.
self._method = method
self._has_varargs = varargs is not None
self._has_varkw = varkw is not None
if defaults is None:
self._required_args = self._args
self._default_args = []
else:
self._required_args = self._args[:-len(defaults)]
self._default_args = self._args[-len(defaults):]
def _RecordArgumentGiven(self, arg_name, arg_status):
"""Mark an argument as being given.
Args:
# arg_name: The name of the argument to mark in arg_status.
# arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN.
arg_name: string
arg_status: dict
Raises:
AttributeError: arg_name is already marked as _GIVEN.
"""
if arg_status.get(arg_name, None) == MethodCallChecker._GIVEN:
raise AttributeError('%s provided more than once' % (arg_name,))
arg_status[arg_name] = MethodCallChecker._GIVEN
def Check(self, params, named_params):
"""Ensures that the parameters used while recording a call are valid.
Args:
# params: A list of positional parameters.
# named_params: A dict of named parameters.
params: list
named_params: dict
Raises:
AttributeError: the given parameters don't work with the given method.
"""
arg_status = dict((a, MethodCallChecker._NEEDED)
for a in self._required_args)
for arg in self._default_args:
arg_status[arg] = MethodCallChecker._DEFAULT
# Check that each positional param is valid.
for i in range(len(params)):
try:
arg_name = self._args[i]
except IndexError:
if not self._has_varargs:
raise AttributeError('%s does not take %d or more positional '
'arguments' % (self._method.__name__, i))
else:
self._RecordArgumentGiven(arg_name, arg_status)
# Check each keyword argument.
for arg_name in named_params:
if arg_name not in arg_status and not self._has_varkw:
raise AttributeError('%s is not expecting keyword argument %s'
% (self._method.__name__, arg_name))
self._RecordArgumentGiven(arg_name, arg_status)
# Ensure all the required arguments have been given.
still_needed = [k for k, v in arg_status.iteritems()
if v == MethodCallChecker._NEEDED]
if still_needed:
raise AttributeError('No values given for arguments %s'
% (' '.join(sorted(still_needed))))
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode, method_to_mock=None):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
# method_to_mock: The actual method being mocked, used for introspection.
method_name: str
call_queue: list or deque
replay_mode: bool
method_to_mock: a method object
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
try:
self._checker = MethodCallChecker(method_to_mock)
except ValueError:
self._checker = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
if self._checker is not None:
self._checker.Check(params, named_params)
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class Not(Comparator):
"""Checks whether a predicates is False.
Example:
mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info)))
"""
def __init__(self, predicate):
"""Initialize.
Args:
# predicate: a Comparator instance.
"""
assert isinstance(predicate, Comparator), ("predicate %r must be a"
" Comparator." % predicate)
self._predicate = predicate
def equals(self, rhs):
"""Check to see whether the predicate is False.
Args:
rhs: A value that will be given in argument of the predicate.
Returns:
bool
"""
return not self._predicate.equals(rhs)
def __repr__(self):
return '<not \'%s\'>' % self._predicate
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
super(MoxTestBase, self).setUp()
self.mox = Mox()
|
relekang/accio
|
refs/heads/master
|
accio/deployments/apps.py
|
1
|
from django.apps import AppConfig
class DeploymentsConfig(AppConfig):
name = 'accio.deployments'
|
JoelBondurant/RandomCodeSamples
|
refs/heads/master
|
python/routeCounter.py
|
1
|
f = open("routes.txt","rt")
maxLen = 0
for x in f:
rtes = x.split("\t")[2]
a = len(rtes)
b = len(rtes.replace(",",""))
c = a - b
if (c > maxLen):
maxLen = c
print(maxLen)
f.close()
|
jnerin/ansible
|
refs/heads/devel
|
lib/ansible/modules/clustering/k8s/k8s_scale.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_scale
short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.
version_added: "2.5"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicatSet,
or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
extends_documentation_fragment:
- k8s_name_options
- k8s_auth_options
- k8s_resource_options
- k8s_scale_options
requirements:
- "python >= 2.7"
- "openshift >= 0.3"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Scale deployment up, and extend timeout
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
replicas: 3
wait_timeout: 60
- name: Scale deployment down when current replicas match
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
current_replicas: 3
replicas: 2
- name: Increase job parallelism
k8s_scale:
api_version: batch/v1
kind: job
name: pi-with-timeout
namespace: testing
replicas: 2
# Match object using local file or inline definition
- name: Scale deployment based on a file from the local filesystem
k8s_scale:
src: /myproject/elastic_deployment.yml
replicas: 3
wait: no
- name: Scale deployment based on a template output
k8s_scale:
resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment based on a file from the Ansible controller filesystem
k8s_scale:
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
'''
RETURN = '''
result:
description:
- If a change was made, will return the patched object, otherwise returns the existing object.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
'''
from ansible.module_utils.k8s.scale import KubernetesAnsibleScaleModule
def main():
KubernetesAnsibleScaleModule().execute_module()
if __name__ == '__main__':
main()
|
BadlybadGames/RPGame-3.0
|
refs/heads/master
|
src/entity/projectiles.py
|
1
|
import math
import constants
import entity
from cocos import collision_model as cm
from cocos.euclid import Vector2
import game
import util
class Projectile(entity.WorldEntity):
image = "arrow.png"
etype = "projectile"
name = "Projectile"
friendly = False
mask_collision = 0b000
mask_event = 0b101
def __init__(self, **kwargs):
super(Projectile, self).__init__()
def init_physics(self, world):
_ud = {"type": "projectile",
"entity": self.eid,
"mask_collision": self.mask_collision,
"mask_event": self.mask_event,
"friendly": self.friendly} # TODO: keeping a reference to the actual entity might be harmful in multiplayer environment.
self.body = world.CreateDynamicBody(position=self.position.copy(), linearDamping=4.0,
userData=_ud)
self.body.CreateCircleFixture(radius=(float(self.size) / constants.PIXEL_TO_METER) / 2, restitution=0)
def update(self, t):
super(Projectile, self).update(t)
self.duration -= t
if self.duration <= 0:
self.die()
def update_movement(self, t):
super(Projectile, self).update_movement(t)
def update_collision(self):
return cm.CircleShape(center=self.position, r=self.size)
def on_collision(self, other, typ):
if typ == "wall":
self.die()
return
success = False
owner = game.Game.get_entity(self.controlled_by)
if not owner:
return
if other is owner:
return
self.on_hit(other)
def on_hit(self, other):
"""Called when a collision occurs"""
other.take_damage(self.damage)
self.die()
def get_real_owner(self):
return game.Game.get_entity(self.controlled_by)
class MeleeWeaponEntity(Projectile):
"""
Important parameters:
Offset: length from center of player to center of collision detection
Arc: Swing arc
Size: Size used for collision detection
"""
name = "MeleeWeaponEntity"
etype = "Projectile"
def __init__(self, **kwargs):
self.image = "sword.png"
super(MeleeWeaponEntity, self).__init__()
self.controlled_by = None # set after initialization
self._already_hit = []
def init_physics(self, world):
owner = self.get_real_owner()
_ud = {"type": "projectile",
"entity": self.eid,
"mask_collision": self.mask_collision,
"mask_event": self.mask_event,
"friendly": self.friendly} # TODO: keeping a reference to the actual entity might be harmful in multiplayer environment.
self.body = world.CreateDynamicBody(position=self.position.copy(), linearDamping=0.0,
userData=_ud)
t = owner.rotation
fixture = self.body.CreatePolygonFixture(box=(self.width, self.length), density=0.005, friction=0.0)
self.body.angle = t
world.CreateRevoluteJoint(bodyA=self.body, bodyB=owner.body, anchor=owner.body.worldCenter, enableMotor=True,
motorSpeed=self.swing_speed*5000, maxMotorTorque=self.swing_speed*1000)
def _init_sprite(self, sprite): # TODO: Sprite still isnt centered on player
sprite.image_anchor = (sprite.image.width/2, 0) #(sprite.image.width/2, sprite.image.height/2)
def update_sprite(self, t):
wielder = game.Game.get_entity(self.wielder)
s = (wielder.size/2.0)
self.sprite.position = ((self.position.copy() * constants.PIXEL_TO_METER + (s, s)))
self.sprite.rotation = self.body.angle
def update(self, t):
super(MeleeWeaponEntity, self).update(t)
wielder = game.Game.get_entity(self.wielder)
#print(self.joint.angle)
self.rotation = self.body.angle # FIXME: Super dirty
#swing_v = float(self.arc) / self.duration
#self.rotation_off = self.rotation_off + swing_v * t
#self.rotation = wielder.rotation + self.rotation_off
#self.duration_left -= t
#if self.duration_left <= 0:
# self.die()
def update_collision(self):
center = Vector2(*self.position) + util.rot_to_vec(self.rotation) * self.offset
#print center, self.size
return cm.CircleShape(center=center, r=self.size)
def on_collision(self, other, typ):
if typ == "wall":
return
success = False
owner = game.Game.get_entity(self.controlled_by)
if not owner:
return
if other is owner:
return
if not other.eid in self._already_hit:
self._already_hit.append(other.eid)
self.on_hit(other)
def on_hit(self, other):
other.take_damage(self.damage)
entity.new_entity(MeleeWeaponEntity)
entity.new_entity(Projectile)
|
gtema/homeautomation-backend
|
refs/heads/master
|
homeautomation/schemas.py
|
1
|
from marshmallow import fields, ValidationError
from flask_marshmallow import Marshmallow
from .models import User, Role, StockProductCategory, StockProduct,\
StockProductItem
ma = Marshmallow()
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided')
class UserSchema(ma.ModelSchema):
'''
User schema
'''
class Meta:
model = User
id = fields.Int(dump_only=True)
username = fields.Str()
class RoleSchema(ma.ModelSchema):
'''
Role schema
'''
class Meta:
model = Role
# Validates for the different fields
id = fields.Integer(dump_only=True)
name = fields.String(validate=must_not_be_blank)
description = fields.String(validate=must_not_be_blank)
class CategorySchema(ma.ModelSchema):
'''
Stock category SER/DE
'''
class Meta:
model = StockProductCategory
id = fields.Int(dump_only=True)
parent_id = fields.Int()
name = fields.Str(required=True)
prio = fields.Int()
class ProductSchema(ma.ModelSchema):
'''
Stock product SER/DE
'''
class Meta:
model = StockProduct
id = fields.Int(dump_only=True)
category_id = fields.Int(required=True, validate=must_not_be_blank)
name = fields.Str(required=True)
volume = fields.Str()
sum_amounts = fields.Boolean()
amount = fields.Int(dump_only=True)
first_started_id = fields.Int(dump_only=True)
first_started_ed = fields.Date(dump_only=True)
class ProductItemSchema(ma.ModelSchema):
'''
Stock product item SER/DE
'''
class Meta:
model = StockProductItem
id = fields.Int(dump_only=True)
product_id = fields.Int(required=True, validate=must_not_be_blank)
is_started = fields.Boolean()
is_disposed = fields.Boolean()
expiry_date = fields.Date()
is_valid = fields.Boolean(dump_only=True)
|
lgscofield/odoo
|
refs/heads/8.0
|
openerp/tests/addons/test_translation_import/tests/test_term_count.py
|
323
|
# -*- coding: utf-8 -*-
import openerp
from openerp.tests import common
class TestTermCount(common.TransactionCase):
def test_count_term(self):
"""
Just make sure we have as many translation entries as we wanted.
"""
openerp.tools.trans_load(self.cr, 'test_translation_import/i18n/fr.po', 'fr_FR', verbose=False)
ids = self.registry('ir.translation').search(self.cr, self.uid,
[('src', '=', '1XBUO5PUYH2RYZSA1FTLRYS8SPCNU1UYXMEYMM25ASV7JC2KTJZQESZYRV9L8CGB')])
self.assertEqual(len(ids), 2)
|
clouserw/zamboni
|
refs/heads/master
|
mkt/translations/tests/test_widgets.py
|
19
|
from nose.tools import eq_
from pyquery import PyQuery as pq
import mkt.site.tests
from mkt.translations import models, widgets
class TestWidget(mkt.site.tests.TestCase):
def test_avoid_purified_translation(self):
# Even if we pass in a LinkifiedTranslation the widget switches to a
# normal Translation before rendering.
w = widgets.TransTextarea.widget()
link = models.LinkifiedTranslation(localized_string='<b>yum yum</b>',
locale='fr', id=10)
link.clean()
widget = w.render('name', link)
eq_(pq(widget).html().strip(), '<b>yum yum</b>')
def test_default_locale(self):
w = widgets.TransTextarea()
result = w.render('name', '')
eq_(pq(result)('textarea:not([lang=init])').attr('lang'), 'en-us')
w.default_locale = 'pl'
result = w.render('name', '')
eq_(pq(result)('textarea:not([lang=init])').attr('lang'), 'pl')
|
tiexinliu/odoo_addons
|
refs/heads/8.0
|
smile_scm/tools/osutil.py
|
5
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from subprocess import os
class cd:
"""Context manager for changing the current working directory
(http://stackoverflow.com/questions/431684/how-do-i-cd-in-python)"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
|
alogg/dolfin
|
refs/heads/master
|
demo/pde/subdomains-poisson/python/demo_subdomains-poisson.py
|
3
|
# Copyright (C) 2011 Marie E. Rognes
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2011-11-09
# Last changed: 2012-08-06
# Begin demo
from dolfin import *
# Create classes for defining parts of the boundaries and the interior
# of the domain
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 1.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1.0)
class Obstacle(SubDomain):
def inside(self, x, on_boundary):
return (between(x[1], (0.5, 0.7)) and between(x[0], (0.2, 1.0)))
# Initialize sub-domain instances
left = Left()
top = Top()
right = Right()
bottom = Bottom()
obstacle = Obstacle()
# Define mesh
mesh = UnitSquareMesh(64, 64)
# Initialize mesh function for interior domains
domains = CellFunction("size_t", mesh)
domains.set_all(0)
obstacle.mark(domains, 1)
# Initialize mesh function for boundary domains
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
left.mark(boundaries, 1)
top.mark(boundaries, 2)
right.mark(boundaries, 3)
bottom.mark(boundaries, 4)
# Define input data
a0 = Constant(1.0)
a1 = Constant(0.01)
g_L = Expression("- 10*exp(- pow(x[1] - 0.5, 2))")
g_R = Constant("1.0")
f = Constant(1.0)
# Define function space and basis functions
V = FunctionSpace(mesh, "CG", 2)
u = TrialFunction(V)
v = TestFunction(V)
# Define Dirichlet boundary conditions at top and bottom boundaries
bcs = [DirichletBC(V, 5.0, boundaries, 2),
DirichletBC(V, 0.0, boundaries, 4)]
# Define new measures associated with the interior domains and
# exterior boundaries
dx = Measure("dx")[domains]
ds = Measure("ds")[boundaries]
# Define variational form
F = (inner(a0*grad(u), grad(v))*dx(0) + inner(a1*grad(u), grad(v))*dx(1)
- g_L*v*ds(1) - g_R*v*ds(3)
- f*v*dx(0) - f*v*dx(1))
# Separate left and right hand sides of equation
a, L = lhs(F), rhs(F)
# Solve problem
u = Function(V)
solve(a == L, u, bcs)
# Evaluate integral of normal gradient over top boundary
n = FacetNormal(mesh)
m1 = dot(grad(u), n)*ds(2)
v1 = assemble(m1)
print "\int grad(u) * n ds(2) = ", v1
# Evaluate integral of u over the obstacle
m2 = u*dx(1)
v2 = assemble(m2)
print "\int u dx(1) = ", v2
# Plot solution and gradient
plot(u, title="u")
plot(grad(u), title="Projected grad(u)")
interactive()
|
robbiet480/home-assistant
|
refs/heads/dev
|
tests/components/ssdp/test_init.py
|
5
|
"""Test the SSDP integration."""
import asyncio
from unittest.mock import Mock, patch
import aiohttp
import pytest
from homeassistant.components import ssdp
from homeassistant.generated import ssdp as gn_ssdp
from tests.common import mock_coro
async def test_scan_match_st(hass):
"""Test matching based on ST."""
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan", return_value=[Mock(st="mock-st", location=None)]
), patch.dict(gn_ssdp.SSDP, {"mock-domain": [{"st": "mock-st"}]}), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
@pytest.mark.parametrize(
"key", (ssdp.ATTR_UPNP_MANUFACTURER, ssdp.ATTR_UPNP_DEVICE_TYPE)
)
async def test_scan_match_upnp_devicedesc(hass, aioclient_mock, key):
"""Test matching based on UPnP device description data."""
aioclient_mock.get(
"http://1.1.1.1",
text=f"""
<root>
<device>
<{key}>Paulus</{key}>
</device>
</root>
""",
)
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1")],
), patch.dict(gn_ssdp.SSDP, {"mock-domain": [{key: "Paulus"}]}), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
async def test_scan_not_all_present(hass, aioclient_mock):
"""Test match fails if some specified attributes are not present."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1")],
), patch.dict(
gn_ssdp.SSDP,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
},
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
async def test_scan_not_all_match(hass, aioclient_mock):
"""Test match fails if some specified attribute values differ."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
<manufacturer>Paulus</manufacturer>
</device>
</root>
""",
)
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1")],
), patch.dict(
gn_ssdp.SSDP,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Not-Paulus",
}
]
},
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
@pytest.mark.parametrize("exc", [asyncio.TimeoutError, aiohttp.ClientError])
async def test_scan_description_fetch_fail(hass, aioclient_mock, exc):
"""Test failing to fetch description."""
aioclient_mock.get("http://1.1.1.1", exc=exc)
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1")],
):
await scanner.async_scan(None)
async def test_scan_description_parse_fail(hass, aioclient_mock):
"""Test invalid XML."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>INVALIDXML
""",
)
scanner = ssdp.Scanner(hass)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1")],
):
await scanner.async_scan(None)
|
Vassy/odoo
|
refs/heads/master
|
addons/purchase/wizard/__init__.py
|
439
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_order_group
import purchase_line_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tadebayo/myedge
|
refs/heads/master
|
myvenv/Lib/site-packages/packaging/requirements.py
|
140
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pyparsing import Literal as L # noqa
from six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
|
ngageoint/scale
|
refs/heads/master
|
scale/metrics/migrations/0009_revert_datetime.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2019-07-25 14:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0008_occurred_datetime'),
]
operations = [
migrations.AlterField(
model_name='metricserror',
name='occurred',
field=models.DateField(db_index=True),
),
migrations.AlterField(
model_name='metricsingest',
name='occurred',
field=models.DateField(db_index=True),
),
migrations.AlterField(
model_name='metricsjobtype',
name='occurred',
field=models.DateField(db_index=True),
),
]
|
gibxxi/nzbToMedia
|
refs/heads/master
|
libs/requests/packages/urllib3/fields.py
|
1007
|
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
sunlianqiang/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/buffer_tests.py
|
91
|
# Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assertTrue(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assertTrue(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assertTrue(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assertTrue(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assertTrue(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assertTrue(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assertTrue(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assertTrue(self.marshal(b'A Titlecased Line').istitle())
self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle())
self.assertTrue(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assertTrue(self.marshal(b' ').isspace())
self.assertTrue(self.marshal(b'\t').isspace())
self.assertTrue(self.marshal(b'\r').isspace())
self.assertTrue(self.marshal(b'\n').isspace())
self.assertTrue(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assertTrue(self.marshal(b'a').isalpha())
self.assertTrue(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assertTrue(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assertTrue(self.marshal(b'a').isalnum())
self.assertTrue(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assertTrue(self.marshal(b'123abc456').isalnum())
self.assertTrue(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assertTrue(self.marshal(b'0').isdigit())
self.assertTrue(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
# check keyword args
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(True))
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=True))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
|
xhochy/arrow
|
refs/heads/master
|
dev/archery/archery/benchmark/google.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import filterfalse, groupby, tee
import json
import subprocess
from tempfile import NamedTemporaryFile
from .core import Benchmark
from ..utils.command import Command
def partition(pred, iterable):
# adapted from python's examples
t1, t2 = tee(iterable)
return list(filter(pred, t1)), list(filterfalse(pred, t2))
class GoogleBenchmarkCommand(Command):
""" Run a google benchmark binary.
This assumes the binary supports the standard command line options,
notably `--benchmark_filter`, `--benchmark_format`, etc...
"""
def __init__(self, benchmark_bin, benchmark_filter=None):
self.bin = benchmark_bin
self.benchmark_filter = benchmark_filter
def list_benchmarks(self):
argv = ["--benchmark_list_tests"]
if self.benchmark_filter:
argv.append("--benchmark_filter={}".format(self.benchmark_filter))
result = self.run(*argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return str.splitlines(result.stdout.decode("utf-8"))
def results(self, repetitions=1):
with NamedTemporaryFile() as out:
argv = ["--benchmark_repetitions={}".format(repetitions),
"--benchmark_out={}".format(out.name),
"--benchmark_out_format=json"]
if self.benchmark_filter:
argv.append(
"--benchmark_filter={}".format(self.benchmark_filter)
)
self.run(*argv, check=True)
return json.load(out)
class GoogleBenchmarkObservation:
""" Represents one run of a single (google c++) benchmark.
Observations are found when running with `--benchmark_repetitions`. Sadly,
the format mixes values and aggregates, e.g.
RegressionSumKernel/32768/0 1 us 1 us 25.8077GB/s
RegressionSumKernel/32768/0 1 us 1 us 25.7066GB/s
RegressionSumKernel/32768/0 1 us 1 us 25.1481GB/s
RegressionSumKernel/32768/0 1 us 1 us 25.846GB/s
RegressionSumKernel/32768/0 1 us 1 us 25.6453GB/s
RegressionSumKernel/32768/0_mean 1 us 1 us 25.6307GB/s
RegressionSumKernel/32768/0_median 1 us 1 us 25.7066GB/s
RegressionSumKernel/32768/0_stddev 0 us 0 us 288.046MB/s
As from benchmark v1.4.1 (2019-04-24), the only way to differentiate an
actual run from the aggregates, is to match on the benchmark name. The
aggregates will be appended with `_$agg_name`.
This class encapsulate the logic to separate runs from aggregate . This is
hopefully avoided in benchmark's master version with a separate json
attribute.
"""
def __init__(self, name, real_time, cpu_time, time_unit, size=None,
bytes_per_second=None, items_per_second=None, **counters):
self._name = name
self.real_time = real_time
self.cpu_time = cpu_time
self.time_unit = time_unit
self.size = size
self.bytes_per_second = bytes_per_second
self.items_per_second = items_per_second
self.counters = counters
@property
def is_agg(self):
""" Indicate if the observation is a run or an aggregate. """
suffixes = ["_mean", "_median", "_stddev"]
return any(map(lambda x: self._name.endswith(x), suffixes))
@property
def is_realtime(self):
""" Indicate if the preferred value is realtime instead of cputime. """
return self.name.find("/realtime") != -1
@property
def name(self):
name = self._name
return name.rsplit("_", maxsplit=1)[0] if self.is_agg else name
@property
def time(self):
return self.real_time if self.is_realtime else self.cpu_time
@property
def value(self):
""" Return the benchmark value."""
return self.bytes_per_second or self.items_per_second or self.time
@property
def unit(self):
if self.bytes_per_second:
return "bytes_per_second"
elif self.items_per_second:
return "items_per_second"
else:
return self.time_unit
def __repr__(self):
return str(self.value)
class GoogleBenchmark(Benchmark):
""" A set of GoogleBenchmarkObservations. """
def __init__(self, name, runs):
""" Initialize a GoogleBenchmark.
Parameters
----------
name: str
Name of the benchmark
runs: list(GoogleBenchmarkObservation)
Repetitions of GoogleBenchmarkObservation run.
"""
self.name = name
# exclude google benchmark aggregate artifacts
_, runs = partition(lambda b: b.is_agg, runs)
self.runs = sorted(runs, key=lambda b: b.value)
unit = self.runs[0].unit
less_is_better = not unit.endswith("per_second")
values = [b.value for b in self.runs]
# Slight kludge to extract the UserCounters for each benchmark
self.counters = self.runs[0].counters
super().__init__(name, unit, less_is_better, values)
def __repr__(self):
return "GoogleBenchmark[name={},runs={}]".format(self.names, self.runs)
@classmethod
def from_json(cls, payload):
def group_key(x):
return x.name
benchmarks = map(lambda x: GoogleBenchmarkObservation(**x), payload)
groups = groupby(sorted(benchmarks, key=group_key), group_key)
return [cls(k, list(bs)) for k, bs in groups]
|
t-abe/chainer
|
refs/heads/master
|
chainer/function_set.py
|
5
|
import numpy
import warnings
from chainer import cuda
from chainer import link
class FunctionSet(link.Chain):
"""Set of links (as "parameterized functions").
FunctionSet is a subclass of :class:`~chainer.Chain`. Function
registration is done just by adding an attribute to :class:`
object.
.. deprecated:: v1.5
Use :class:`~chainer.Chain` instead.
.. note::
FunctionSet was used for manipulation of one or more parameterized
functions. The concept of parameterized function is gone, and it has
been replaced by :class:`~chainer.Link` and :class:`~chainer.Chain`.
"""
def __init__(self, **links):
super(FunctionSet, self).__init__(**links)
warnings.warn('FunctionSet is deprecated. Use Chain instead.',
DeprecationWarning)
def __setattr__(self, key, value):
d = self.__dict__
if isinstance(value, link.Link):
# we cannot use add_link here since add_link calls setattr, and we
# should allow overwriting for backward compatibility
if value.name is not None:
raise ValueError(
'given link is already registered to another chain by name'
' %s' % value.name)
if key in d:
d[key].name = None
del d[key]
else:
d['_children'].append(key)
value.name = key
# deal with properties
prop = getattr(self.__class__, key, None)
if isinstance(prop, property) and prop.fset is not None:
prop.fset(self, value)
else:
super(FunctionSet, self).__setattr__(key, value)
def collect_parameters(self):
"""Returns a tuple of parameters and gradients.
Returns:
Tuple (pair) of two tuples. The first element is a tuple of
parameter arrays, and the second is a tuple of gradient arrays.
"""
msg = ("'collect_parameters' is deprecated. "
"You can pass FunctionSet itself to 'optimizer.setup'")
warnings.warn(msg, FutureWarning)
return self
def __getitem__(self, key):
"""Returns an attribute by name.
Args:
key (str): Name of the attribute.
Returns:
Attribute.
.. admonition:: Example
>>> model = FunctionSet(l1=L.Linear(10, 10), l2=L.Linear(10, 10))
>>> l1 = model['l1'] # equivalent to l1 = model.l1
"""
return getattr(self, key)
def copy_parameters_from(self, params):
"""Copies parameters from another source without reallocation.
Args:
params (Iterable): Iterable of parameter arrays.
"""
for dst, src in zip(self.parameters, params):
if isinstance(dst, numpy.ndarray):
if isinstance(src, numpy.ndarray):
numpy.copyto(dst, src)
else:
dst[:] = src.get()
elif isinstance(src, numpy.ndarray):
dst.set(src)
else:
cuda.copy(src, out=dst)
@property
def parameters(self):
"""Tuple of parameter arrays of all registered functions.
The order of parameters is consistent with :meth:`parameters` property.
"""
return tuple(param.data for param in self.params())
@parameters.setter
def parameters(self, params):
assert len(params) == len([_ for _ in self.params()])
for dst, src in zip(self.params(), params):
dst.data = src
@property
def gradients(self):
"""Tuple of gradient arrays of all registered functions.
The order of gradients is consistent with :meth:`parameters` property.
"""
return tuple(param.grad for param in self.params())
@gradients.setter
def gradients(self, grads):
assert len(grads) == len([_ for _ in self.params()])
for dst, src in zip(self.params(), grads):
dst.grad = src
|
mozilla/kuma
|
refs/heads/master
|
kuma/wiki/tests/test_models.py
|
1
|
import time
from datetime import date, datetime, timedelta
from unittest import mock
from urllib.parse import urlparse
from xml.sax.saxutils import escape
import pytest
from constance import config
from constance.test import override_config
from django.conf import settings
from django.core.exceptions import ValidationError
from kuma.attachments.models import Attachment, AttachmentRevision
from kuma.core.exceptions import ProgrammingError
from kuma.core.tests import get_user
from kuma.core.urlresolvers import reverse
from kuma.users.tests import UserTestCase
from . import create_document_tree, document, revision
from .. import tasks
from ..constants import EXPERIMENT_TITLE_PREFIX, REDIRECT_CONTENT
from ..events import EditDocumentInTreeEvent
from ..exceptions import (DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress, PageMoveError)
from ..models import (Document, DocumentTag, Revision, RevisionIP,
TaggedDocument)
from ..utils import tidy_content
def test_clean_current_revision_with_no_change(root_doc, wiki_user_2):
assert root_doc.clean_current_revision(wiki_user_2) is None
def test_clean_current_revision_with_no_current(root_doc, wiki_user_2):
root_doc.current_revison = None
assert root_doc.clean_current_revision(wiki_user_2) is None
@pytest.mark.parametrize('is_approved', (True, False))
@pytest.mark.parametrize('doc_case', ('default-language', 'translation'))
def test_clean_current_revision(root_doc, trans_doc, wiki_user_2, doc_case,
is_approved):
doc = trans_doc if doc_case == 'translation' else root_doc
original_doc_slug = doc.slug
original_doc_title = doc.title
current_rev = doc.current_revision
current_rev.content = (
'<div onclick="alert(\'hacked!\')">click me</div>'
)
current_rev.tidied_content = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head>
<title></title>
</head>
<body>
<div onclick="alert('hacked!')">
click me
</div>
</body>
</html>
"""
tags = '"Banana" "Orange" "Apple"'
l10n_tags = {'inprogress'}
review_tags = {'editorial', 'technical'}
current_rev.tags = tags
# Let's make the revision's slug and title different from the document
# to ensure that they're corrected in the end.
current_rev.slug = original_doc_slug + 's'
current_rev.title = original_doc_title + 's'
current_rev.is_approved = is_approved
current_rev.localization_tags.set(*l10n_tags)
current_rev.review_tags.set(*review_tags)
prior_pk = current_rev.pk
prior_creator = current_rev.creator
prior_created = current_rev.created
if doc_case == 'translation':
expected_based_on_pk = current_rev.based_on.pk
else:
expected_based_on_pk = current_rev.pk
rev = doc.clean_current_revision(wiki_user_2)
assert rev
assert rev.pk != prior_pk
assert rev.creator != prior_creator
assert rev.creator == wiki_user_2
assert rev.created > prior_created
assert rev.based_on.pk == expected_based_on_pk
assert rev.content == '<div>click me</div>'
assert rev.tidied_content == (
'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">\n'
'<html>\n'
' <head>\n'
' <title></title>\n'
' </head>\n'
' <body>\n'
' <div>\n'
' click me\n'
' </div>\n'
' </body>\n'
'</html>\n'
)
assert rev.tags == tags
assert set(t.name for t in rev.localization_tags.all()) == l10n_tags
assert set(t.name for t in rev.review_tags.all()) == review_tags
assert rev.comment == 'Clean prior revision of {} by {}'.format(
prior_created, prior_creator)
assert rev.slug == original_doc_slug
assert rev.title == original_doc_title
assert doc.current_revision.pk == rev.pk
def test_document_is_not_experiment():
"""A document without the experiment prefix is not an experiment."""
doc = Document(slug='test')
assert not doc.is_experiment
def test_document_is_experiment():
"""A document with the experiment prefix is an experiment."""
doc = Document(slug=EXPERIMENT_TITLE_PREFIX + 'test')
assert doc.is_experiment
@pytest.mark.parametrize('slug,legacy', [
# See LEGACY_MINDTOUCH_NAMESPACES in ../constants.py
('Help:Login', True),
('Help_talk:Login', True),
('Project:MDN', True),
('Project_talk:MDN', True),
('Special:easter_egg', True),
('Talk:Web:CSS', True),
('Template:domxref', True),
('Template_talk:domxref', True),
('User:jezdez', True),
('User_talk:jezdez', True),
# Experiments aren't legacy yet
('Experiment:Blue', False),
# Slugs without colons don't have namespaces
('CSS', False),
# Slugs with colons might not be legacy
(':hover', False)
])
def test_document_has_legacy_namespace(slug, legacy):
"""Excluded slugs should not update the search index."""
assert Document(slug=slug).has_legacy_namespace == legacy
def test_document_delete_removes_tag_relationsip(root_doc):
"""Deleting a tagged document also deletes the tag relationship."""
root_doc.tags.add('grape')
assert TaggedDocument.objects.count() == 1
root_doc.delete()
assert TaggedDocument.objects.count() == 0
def test_document_raises_error_when_translating_non_localizable(root_doc):
"""Adding a translation of a non-localizable document raises an error."""
root_doc.is_localizable = False
root_doc.save()
de_doc = Document(parent=root_doc, slug='Rübe', locale='de')
with pytest.raises(ValidationError):
de_doc.save()
def test_document_raises_error_setting_non_loc_for_trans_doc(trans_doc):
"""Setting is_localizable for a translated document raises an error."""
en_doc = trans_doc.parent
en_doc.is_localizable = False
with pytest.raises(ValidationError):
en_doc.save()
def test_document_non_english_implies_non_localizable(db):
"""All non-English documents are set non-localizable."""
es_doc = Document.objects.create(locale='es', slug='Tubérculos')
assert not es_doc.is_localizable
def test_document_translations(trans_doc):
"""other_translations lists other translations, English first."""
en_doc = trans_doc.parent
ar_doc = Document.objects.create(locale='ar', slug='جذور الخضروات',
parent=en_doc)
# Translations are returned English first, then ordered, and omit self
assert ar_doc.locale < en_doc.locale < trans_doc.locale
assert en_doc.other_translations == [ar_doc, trans_doc]
assert trans_doc.other_translations == [en_doc, ar_doc]
assert ar_doc.other_translations == [en_doc, trans_doc]
def test_document_parents(root_doc):
"""Document.parents gives the document hierarchy."""
assert root_doc.parents == []
child_doc = Document.objects.create(parent_topic=root_doc,
slug=root_doc.slug + '/Child')
assert child_doc.parents == [root_doc]
gchild_doc = Document.objects.create(parent_topic=child_doc,
slug=child_doc.slug + '/GrandChild')
assert gchild_doc.parents == [root_doc, child_doc]
@pytest.mark.parametrize('url',
(settings.SITE_URL + '/en-US/Mozilla',
'/en-US/Mozilla',
'/',
))
def test_document_redirect_allows_valid_url(db, url):
"""get_redirect_url returns valid URLs."""
title = 'Mozilla'
html = REDIRECT_CONTENT % {'href': url, 'title': title}
doc = Document.objects.create(locale='en-US', slug='Redirect',
is_redirect=True, html=html)
parsed = urlparse(url)
assert doc.get_redirect_url() == parsed.path
@pytest.mark.parametrize('url',
('//evilsite.com',
'https://example.com/foriegn_url',
))
def test_document_redirect_rejects_invalid_url(db, url):
"""get_redirect_url returns None for invalid URLs."""
html = REDIRECT_CONTENT % {'href': url, 'title': 'Invalid URL'}
doc = Document.objects.create(locale='en-US', slug='Redirect',
is_redirect=True, html=html)
assert doc.get_redirect_url() is None
def test_document_get_full_url(root_doc):
"""get_full_url returns full URLs."""
assert root_doc.get_full_url() == settings.SITE_URL + '/en-US/docs/Root'
def test_document_from_url(root_doc):
"""from_url returns the document for an absolute URL."""
doc = Document.from_url(root_doc.get_absolute_url())
assert doc == root_doc
def test_document_from_url_locale_matches_translation(trans_doc):
"""from_url matches translation with locale plus English slug."""
en_doc = trans_doc.parent
url = reverse('wiki.document', locale=trans_doc.locale, args=[en_doc.slug])
doc = Document.from_url(url)
assert doc == trans_doc
def test_document_from_url_bad_slug_returns_none(trans_doc):
"""from_url returns None for an invalid slug."""
en_doc = trans_doc.parent
url = reverse('wiki.document', locale=trans_doc.locale,
args=[en_doc.slug + '_bad_slug'])
doc = Document.from_url(url)
assert doc is None
def test_document_from_url_revision_url_returns_none(create_revision):
"""from_url returns None for a revision URL."""
doc = Document.from_url(create_revision.get_absolute_url())
assert doc is None
def test_document_from_url_full_url_returns_doc(root_doc):
"""from_url returns the document for a full URL."""
url = root_doc.get_full_url()
assert Document.from_url(url) == root_doc
def test_document_from_url_other_url_returns_none(root_doc):
"""from_url returns None for a different domain."""
assert settings.SITE_URL != 'https://example.com'
url = 'https://example.com' + root_doc.get_absolute_url()
assert Document.from_url(url) is None
def test_document_get_redirect_document(root_doc):
"""get_redirect_document returns the destination document."""
old_slug = root_doc.slug
root_doc._move_tree(new_slug='Moved')
old_doc = Document.objects.get(slug=old_slug)
assert old_doc.get_redirect_document() == root_doc
@pytest.mark.parametrize('invalidate_cdn_cache', (True, False))
@mock.patch('kuma.wiki.models.render_done')
def test_document_render_invalidate_cdn_cache(mock_render_done, root_doc,
invalidate_cdn_cache):
"""
The "invalidate_cdn_cache" argument to render is passed through
as one of the arguments that the "render_done" signal provides.
"""
root_doc.render(invalidate_cdn_cache=invalidate_cdn_cache)
mock_render_done.send.assert_called_once_with(
sender=root_doc.__class__,
instance=root_doc,
invalidate_cdn_cache=invalidate_cdn_cache
)
class UserDocumentTests(UserTestCase):
"""Document tests which need the users fixture"""
def test_default_topic_parents_for_translation(self):
"""A translated document with no topic parent should by default use
the translation of its translation parent's topic parent."""
orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='test section',
save=True)
orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test',
parent_topic=orig_pt, save=True)
trans_pt = document(locale='fr', title='le test section',
parent=orig_pt, save=True)
trans = document(locale='fr', title='le test',
parent=orig, save=True)
assert trans.parent_topic
assert trans_pt.pk == trans.parent_topic.pk
def test_default_topic_with_stub_creation(self):
orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='test section',
save=True)
orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test',
parent_topic=orig_pt, save=True)
trans = document(locale='fr', title='le test',
parent=orig, save=True)
# There should be a translation topic parent
trans_pt = trans.parent_topic
assert trans_pt
# The locale of the topic parent should match the new translation
assert trans_pt.locale == trans.locale
# But, the translation's topic parent must *not* be the translation
# parent's topic parent
assert trans_pt.pk != orig_pt.pk
# Still, since the topic parent is an autocreated stub, it shares its
# title with the original.
assert trans_pt.title == orig_pt.title
# Oh, and it should point to the original parent topic as its
# translation parent
assert trans_pt.parent == orig_pt
def test_default_topic_with_path_gaps(self):
# Build a path of docs in en-US
orig_path = ('MDN', 'web', 'CSS', 'properties', 'banana', 'leaf')
docs, doc = [], None
for title in orig_path:
doc = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title=title,
parent_topic=doc, save=True)
revision(document=doc, title=title, save=True)
docs.append(doc)
# Translate, but leave gaps for stubs
trans_0 = document(locale='fr', title='le MDN',
parent=docs[0], save=True)
revision(document=trans_0, title='le MDN', tags="LeTest!", save=True)
trans_2 = document(locale='fr', title='le CSS',
parent=docs[2], save=True)
revision(document=trans_2, title='le CSS', tags="LeTest!", save=True)
trans_5 = document(locale='fr', title='le leaf',
parent=docs[5], save=True)
revision(document=trans_5, title='le ;eaf', tags="LeTest!", save=True)
# Make sure trans_2 got the right parent
assert trans_2.parents[0].pk == trans_0.pk
# Ensure the translated parents and stubs appear properly in the path
parents_5 = trans_5.parents
assert parents_5[0].pk == trans_0.pk
assert parents_5[1].locale == trans_5.locale
assert parents_5[1].title == docs[1].title
assert parents_5[1].current_revision.pk != docs[1].current_revision.pk
assert parents_5[2].pk == trans_2.pk
assert parents_5[3].locale == trans_5.locale
assert parents_5[3].title == docs[3].title
assert parents_5[3].current_revision.pk != docs[3].current_revision.pk
assert parents_5[4].locale == trans_5.locale
assert parents_5[4].title == docs[4].title
assert parents_5[4].current_revision.pk != docs[4].current_revision.pk
for p in parents_5:
assert p.current_revision
if p.pk not in (trans_0.pk, trans_2.pk, trans_5.pk):
assert 'NeedsTranslation' in p.current_revision.tags
assert 'TopicStub' in p.current_revision.tags
assert p.current_revision.localization_in_progress
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
french_bottom.repair_breadcrumbs()
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
assert french_mid.id == french_bottom_fixed.parent_topic.id
assert french_top.id == french_bottom_fixed.parent_topic.parent_topic.id
def test_code_sample_extraction(self):
"""Make sure sample extraction works from the model.
This is a smaller version of the test from test_content.py"""
sample_html = '<p class="foo">Hello world!</p>'
sample_css = '.foo p { color: red; }'
sample_js = 'window.alert("Hi there!");'
doc_src = """
<p>This is a page. Deal with it.</p>
<ul id="s2" class="code-sample">
<li><pre class="brush: html">%s</pre></li>
<li><pre class="brush: css">%s</pre></li>
<li><pre class="brush: js">%s</pre></li>
</ul>
<p>More content shows up here.</p>
""" % (escape(sample_html), escape(sample_css), escape(sample_js))
rev = revision(is_approved=True, save=True, content=doc_src)
result = rev.document.extract.code_sample('s2')
assert sample_html.strip() == result['html'].strip()
assert sample_css.strip() == result['css'].strip()
assert sample_js.strip() == result['js'].strip()
def test_tree_is_watched_by(self):
rev = revision()
testuser2 = get_user(username='testuser2')
EditDocumentInTreeEvent.notify(testuser2, rev.document)
assert rev.document.tree_is_watched_by(testuser2)
def test_parent_trees_watched_by(self):
root_doc, child_doc, grandchild_doc = create_document_tree()
testuser2 = get_user(username='testuser2')
EditDocumentInTreeEvent.notify(testuser2, root_doc)
EditDocumentInTreeEvent.notify(testuser2, child_doc)
assert 2 == len(grandchild_doc.parent_trees_watched_by(testuser2))
@pytest.mark.tags
class TaggedDocumentTests(UserTestCase):
"""Tests for tags in Documents and Revisions"""
def test_revision_tags(self):
"""Change tags on Document by creating Revisions"""
rev = revision(is_approved=True, save=True, content='Sample document')
assert 0 == Document.objects.filter(tags__name='foo').count()
assert 0 == Document.objects.filter(tags__name='alpha').count()
r = revision(document=rev.document, content='Update to document',
is_approved=True, tags="foo, bar, baz")
r.save()
assert 1 == Document.objects.filter(tags__name='foo').count()
assert 0 == Document.objects.filter(tags__name='alpha').count()
r = revision(document=rev.document, content='Another update',
is_approved=True, tags="alpha, beta, gamma")
r.save()
assert 0 == Document.objects.filter(tags__name='foo').count()
assert 1 == Document.objects.filter(tags__name='alpha').count()
def test_duplicate_tags_with_creation(self):
rev = revision(
is_approved=True, save=True, content='Sample document',
tags="test Test")
assert rev.document.tags.count() == 1
tag = rev.document.tags.get()
assert tag.name in ('test', 'Test')
def test_duplicate_tags_with_existing(self):
dt = DocumentTag.objects.create(name='Test')
rev = revision(
is_approved=True, save=True, content='Sample document',
tags="test Test")
assert rev.document.tags.count() == 1
tag = rev.document.tags.get()
assert tag == dt
class RevisionTests(UserTestCase):
"""Tests for the Revision model"""
def test_approved_revision_updates_html(self):
"""Creating an approved revision updates document.html"""
rev = revision(is_approved=True, save=True,
content='Replace document html')
assert 'Replace document html' in rev.document.html, \
'"Replace document html" not in %s' % rev.document.html
# Creating another approved revision replaces it again
r = revision(document=rev.document, content='Replace html again',
is_approved=True)
r.save()
assert 'Replace html again' in rev.document.html, \
'"Replace html again" not in %s' % rev.document.html
def test_unapproved_revision_not_updates_html(self):
"""Creating an unapproved revision does not update document.html"""
rev = revision(is_approved=True, save=True, content='Here to stay')
assert 'Here to stay' in rev.document.html, \
'"Here to stay" not in %s' % rev.document.html
# Creating another approved revision keeps initial content
r = revision(document=rev.document, content='Fail to replace html',
is_approved=False)
r.save()
assert 'Here to stay' in rev.document.html, \
'"Here to stay" not in %s' % rev.document.html
def test_revision_unicode(self):
"""Revision containing unicode characters is saved successfully."""
content = 'Firefox informa\xe7\xf5es \u30d8\u30eb'
rev = revision(is_approved=True, save=True, content=content)
assert content == rev.content
def test_save_bad_based_on(self):
"""Saving a Revision with a bad based_on value raises an error."""
r = revision()
r.based_on = revision() # Revision of some other unrelated Document
self.assertRaises(ProgrammingError, r.save)
def test_correct_based_on_to_none(self):
"""Assure Revision.clean() changes a bad based_on value to None when
there is no current_revision of the English document."""
r = revision()
r.based_on = revision() # Revision of some other unrelated Document
self.assertRaises(ValidationError, r.clean)
assert r.based_on is None
def test_correct_based_on_to_current_revision(self):
"""Assure Revision.clean() defaults based_on value to the English
doc's current_revision when there is one."""
# Make English rev:
en_rev = revision(is_approved=True)
en_rev.save()
# Make Deutsch translation:
de_doc = document(parent=en_rev.document, locale='de')
de_doc.save()
de_rev = revision(document=de_doc)
# Set based_on to a de rev to simulate fixing broken translation source
de_rev.based_on = de_rev
de_rev.clean()
assert en_rev.document.current_revision == de_rev.based_on
def test_previous(self):
"""Revision.previous should return this revision's document's
most recent approved revision."""
rev = revision(is_approved=True, created=datetime(2017, 4, 15, 9, 23),
save=True)
next_rev = revision(document=rev.document, content="Updated",
is_approved=True,
created=datetime(2017, 4, 15, 9, 24), save=True)
last_rev = revision(document=rev.document, content="Finally",
is_approved=True,
created=datetime(2017, 4, 15, 9, 25), save=True)
trans = Document.objects.create(parent=rev.document, locale='fr',
title='In French')
trans_rev = revision(document=trans, is_approved=True,
based_on=last_rev,
created=datetime(2017, 4, 15, 9, 56), save=True)
assert rev.previous is None
assert next_rev.previous == rev
assert last_rev.previous == next_rev
assert trans_rev.previous == last_rev
@pytest.mark.toc
def test_show_toc(self):
"""Setting toc_depth appropriately affects the Document's
show_toc property."""
rev = revision(is_approved=True, save=True,
content='Toggle table of contents.')
assert (rev.toc_depth != 0)
assert rev.document.show_toc
r = revision(document=rev.document, content=rev.content, toc_depth=0,
is_approved=True)
r.save()
assert not rev.document.show_toc
r = revision(document=rev.document, content=r.content, toc_depth=1,
is_approved=True)
r.save()
assert rev.document.show_toc
def test_revert(self):
"""Reverting to a specific revision."""
rev = revision(is_approved=True, save=True, content='Test reverting')
old_id = rev.id
revision(document=rev.document,
title='Test reverting',
content='An edit to revert',
comment='This edit gets reverted',
is_approved=True)
rev.save()
reverted = rev.document.revert(rev, rev.creator)
assert 'Revert to' in reverted.comment
assert 'Test reverting' == reverted.content
assert old_id != reverted.id
def test_revert_review_tags(self):
rev = revision(is_approved=True, save=True,
content='Test reverting with review tags')
rev.review_tags.set('technical')
r2 = revision(document=rev.document,
title='Test reverting with review tags',
content='An edit to revert',
comment='This edit gets reverted',
is_approved=True)
r2.save()
r2.review_tags.set('editorial')
reverted = rev.document.revert(rev, rev.creator)
reverted_tags = [t.name for t in reverted.review_tags.all()]
assert 'technical' in reverted_tags
assert 'editorial' not in reverted_tags
def test_get_tidied_content_uses_model_field_first(self):
content = '<h1> Test get_tidied_content. </h1>'
fake_tidied = '<h1> Fake tidied. </h1>'
rev = revision(is_approved=True, save=True, content=content,
tidied_content=fake_tidied)
assert fake_tidied == rev.get_tidied_content()
def test_get_tidied_content_tidies_in_process_by_default(self):
content = '<h1> Test get_tidied_content </h1>'
rev = revision(is_approved=True, save=True, content=content)
tidied_content, errors = tidy_content(
'<h1 id="Test_get_tidied_content"> Test get_tidied_content </h1>'
)
assert tidied_content == rev.get_tidied_content()
def test_get_tidied_content_returns_none_on_allow_none(self):
rev = revision(is_approved=True, save=True,
content='Test get_tidied_content can return None.')
assert rev.get_tidied_content(allow_none=True) is None
class GetCurrentOrLatestRevisionTests(UserTestCase):
"""Tests for current_or_latest_revision."""
def test_single_approved(self):
"""Get approved revision."""
rev = revision(is_approved=True, save=True)
assert rev == rev.document.current_or_latest_revision()
def test_multiple_approved(self):
"""When multiple approved revisions exist, return the most recent."""
r1 = revision(is_approved=True, save=True)
r2 = revision(is_approved=True, save=True, document=r1.document)
assert r2 == r2.document.current_or_latest_revision()
def test_latest(self):
"""Return latest revision when no current exists."""
r1 = revision(is_approved=False, save=True,
created=datetime.now() - timedelta(days=1))
r2 = revision(is_approved=False, save=True, document=r1.document)
assert r2 == r1.document.current_or_latest_revision()
@override_config(
KUMA_DOCUMENT_RENDER_TIMEOUT=600.0,
KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT=7.0)
class DeferredRenderingTests(UserTestCase):
def setUp(self):
super(DeferredRenderingTests, self).setUp()
self.rendered_content = 'THIS IS RENDERED'
self.raw_content = 'THIS IS NOT RENDERED CONTENT'
self.r1 = revision(is_approved=True, save=True, content='Doc 1')
self.d1 = self.r1.document
config.KUMA_DOCUMENT_RENDER_TIMEOUT = 600.0
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 7.0
def tearDown(self):
super(DeferredRenderingTests, self).tearDown()
self.d1.delete()
def test_rendering_fields(self):
"""Defaults for model fields related to rendering should work as
expected"""
assert not self.d1.rendered_html
assert not self.d1.defer_rendering
assert not self.d1.is_rendering_scheduled
assert not self.d1.is_rendering_in_progress
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_get_rendered(self, mock_kumascript_get):
"""get_rendered() should return rendered content when available,
attempt a render() when it's not"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# First, try getting the rendered version of a document. It should
# trigger a call to kumascript.
assert not self.d1.rendered_html
assert not self.d1.render_started_at
assert not self.d1.last_rendered_at
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
assert mock_kumascript_get.called
assert self.rendered_content == result_rendered
assert self.rendered_content == self.d1.rendered_html
# Next, get a fresh copy of the document and try getting a rendering.
# It should *not* call out to kumascript, because the rendered content
# should be in the DB.
d1_fresh = Document.objects.get(pk=self.d1.pk)
assert self.rendered_content == d1_fresh.rendered_html
assert d1_fresh.render_started_at
assert d1_fresh.last_rendered_at
mock_kumascript_get.called = False
result_rendered, _ = d1_fresh.get_rendered(None, 'http://testserver/')
assert not mock_kumascript_get.called
assert self.rendered_content == result_rendered
@mock.patch('kuma.wiki.models.render_done')
def test_build_json_on_render(self, mock_render_done):
"""
A document's json field is refreshed on render(), but not on save()
bug 875349
"""
self.d1.save()
assert not mock_render_done.send.called
mock_render_done.reset()
self.d1.render()
assert mock_render_done.send.called
@mock.patch('kuma.wiki.kumascript.get')
@override_config(KUMASCRIPT_TIMEOUT=1.0)
def test_get_summary(self, mock_kumascript_get):
"""
get_summary() should attempt to use rendered
"""
mock_kumascript_get.return_value = ('<p>summary!</p>', None)
assert not self.d1.rendered_html
result_summary = self.d1.get_summary()
assert not mock_kumascript_get.called
assert not self.d1.rendered_html
self.d1.render()
assert self.d1.rendered_html
assert mock_kumascript_get.called
result_summary = self.d1.get_summary()
assert 'summary!' == result_summary
@mock.patch('kuma.wiki.kumascript.get')
def test_one_render_at_a_time(self, mock_kumascript_get):
"""Only one in-progress rendering should be allowed for a Document"""
mock_kumascript_get.return_value = (self.rendered_content, None)
self.d1.render_started_at = datetime.now()
self.d1.save()
with pytest.raises(DocumentRenderingInProgress):
self.d1.render('', 'http://testserver/')
@mock.patch('kuma.wiki.kumascript.get')
@override_config(KUMA_DOCUMENT_RENDER_TIMEOUT=5.0)
def test_render_timeout(self, mock_kumascript_get):
"""
A rendering that has taken too long is no longer considered in progress
"""
mock_kumascript_get.return_value = (self.rendered_content, None)
self.d1.render_started_at = (datetime.now() -
timedelta(seconds=5.0 + 1))
self.d1.save()
# No DocumentRenderingInProgress raised
self.d1.render('', 'http://testserver/')
@mock.patch('kuma.wiki.kumascript.get')
def test_long_render_sets_deferred(self, mock_kumascript_get):
"""A rendering that takes more than a desired response time marks the
document as in need of deferred rendering in the future."""
config.KUMASCRIPT_TIMEOUT = 1.0
rendered_content = self.rendered_content
def my_kumascript_get(self, base_url, cache_control, timeout):
time.sleep(1.0)
return (rendered_content, None)
mock_kumascript_get.side_effect = my_kumascript_get
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 2.0
self.d1.render('', 'http://testserver/')
assert not self.d1.defer_rendering
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 0.5
self.d1.render('', 'http://testserver/')
assert self.d1.defer_rendering
config.KUMASCRIPT_TIMEOUT = 0.0
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_schedule_rendering(self, mock_render_document_delay,
mock_kumascript_get):
mock_kumascript_get.return_value = (self.rendered_content, None)
# Scheduling for a non-deferred render should happen on the spot.
self.d1.defer_rendering = False
self.d1.save()
assert not self.d1.render_scheduled_at
assert not self.d1.last_rendered_at
self.d1.schedule_rendering(None, 'http://testserver/')
assert self.d1.render_scheduled_at
assert self.d1.last_rendered_at
assert not mock_render_document_delay.called
assert not self.d1.is_rendering_scheduled
# Reset the significant fields and try a deferred render.
self.d1.last_rendered_at = None
self.d1.render_started_at = None
self.d1.render_scheduled_at = None
self.d1.defer_rendering = True
self.d1.save()
# Scheduling for a deferred render should result in a queued task.
self.d1.schedule_rendering(None, 'http://testserver/')
assert self.d1.render_scheduled_at
assert not self.d1.last_rendered_at
assert mock_render_document_delay.called
# And, since our mock delay() doesn't actually queue a task, this
# document should appear to be scheduled for a pending render not yet
# in progress.
assert self.d1.is_rendering_scheduled
assert not self.d1.is_rendering_in_progress
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_immediate_rendering(self, mock_render_document_delay,
mock_kumascript_get):
'''Rendering is immediate when defer_rendering is False'''
mock_kumascript_get.return_value = (self.rendered_content, None)
mock_render_document_delay.side_effect = Exception('Should not be called')
self.d1.rendered_html = ''
self.d1.defer_rendering = False
self.d1.save()
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
assert not mock_render_document_delay.called
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_deferred_rendering(self, mock_render_document_delay,
mock_kumascript_get):
'''Rendering is deferred when defer_rendering is True.'''
mock_kumascript_get.side_effect = Exception('Should not be called')
self.d1.rendered_html = ''
self.d1.defer_rendering = True
self.d1.save()
with pytest.raises(DocumentRenderedContentNotAvailable):
self.d1.get_rendered(None, 'http://testserver/')
assert mock_render_document_delay.called
@mock.patch('kuma.wiki.kumascript.get')
def test_errors_stored_correctly(self, mock_kumascript_get):
errors = [
{'level': 'error', 'message': 'This is a fake error',
'args': ['FakeError']},
]
mock_kumascript_get.return_value = (self.rendered_content, errors)
r_rendered, r_errors = self.d1.get_rendered(None, 'http://testserver/')
assert errors, r_errors
class RenderExpiresTests(UserTestCase):
"""Tests for max-age and automatic document rebuild"""
def test_find_stale_documents(self):
now = datetime.now()
# Fresh
d1 = document(title='Aged 1')
d1.render_expires = now + timedelta(seconds=100)
d1.save()
# Stale, exactly now
d2 = document(title='Aged 2')
d2.render_expires = now
d2.save()
# Stale, a little while ago
d3 = document(title='Aged 3')
d3.render_expires = now - timedelta(seconds=100)
d3.save()
stale_docs = Document.objects.get_by_stale_rendering()
assert (sorted([d2.pk, d3.pk]) ==
sorted([x.pk for x in stale_docs]))
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_update_expires_with_max_age(self, mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
max_age = 1000
now = datetime.now()
d1 = document(title='Aged 1')
d1.render_max_age = max_age
d1.save()
d1.render()
# HACK: Exact time comparisons suck, because execution time.
later = now + timedelta(seconds=max_age)
assert d1.render_expires > later - timedelta(seconds=1)
assert d1.render_expires < later + timedelta(seconds=1)
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_update_expires_without_max_age(self, mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
now = datetime.now()
d1 = document(title='Aged 1')
d1.render_expires = now - timedelta(seconds=100)
d1.save()
d1.render()
assert not d1.render_expires
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_render_stale(self, mock_render_document_delay,
mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
now = datetime.now()
earlier = now - timedelta(seconds=1000)
d1 = document(title='Aged 3')
d1.last_rendered_at = earlier
d1.render_expires = now - timedelta(seconds=100)
d1.save()
tasks.render_stale_documents()
d1_fresh = Document.objects.get(pk=d1.pk)
assert not mock_render_document_delay.called
assert d1_fresh.last_rendered_at > earlier
class PageMoveTests(UserTestCase):
"""Tests for page-moving and associated functionality."""
@pytest.mark.move
def test_children_simple(self):
"""A basic tree with two direct children and no sub-trees on
either."""
d1 = document(title='Parent', save=True)
d2 = document(title='Child', save=True)
d2.parent_topic = d1
d2.save()
d3 = document(title='Another child', save=True)
d3.parent_topic = d1
d3.save()
assert [d2, d3] == d1.get_descendants()
def test_get_descendants_limited(self):
"""Tests limiting of descendant levels"""
def _make_doc(title, parent=None):
doc = document(title=title, save=True)
if parent:
doc.parent_topic = parent
doc.save()
return doc
parent = _make_doc('Parent')
child1 = _make_doc('Child 1', parent)
child2 = _make_doc('Child 2', parent)
grandchild = _make_doc('GrandChild 1', child1)
_make_doc('Great GrandChild 1', grandchild)
# Test descendant counts
assert 4 == len(parent.get_descendants()) # All
assert 2 == len(parent.get_descendants(1))
assert 3 == len(parent.get_descendants(2))
assert 0 == len(parent.get_descendants(0))
assert 0 == len(child2.get_descendants(10))
assert 1 == len(grandchild.get_descendants(4))
def test_children_complex(self):
"""A slightly more complex tree, with multiple children, some
of which do/don't have their own children."""
top = document(title='Parent', save=True)
c1 = document(title='Child 1', save=True)
c1.parent_topic = top
c1.save()
gc1 = document(title='Child of child 1', save=True)
gc1.parent_topic = c1
gc1.save()
c2 = document(title='Child 2', save=True)
c2.parent_topic = top
c2.save()
gc2 = document(title='Child of child 2', save=True)
gc2.parent_topic = c2
gc2.save()
gc3 = document(title='Another child of child 2', save=True)
gc3.parent_topic = c2
gc3.save()
ggc1 = document(title='Child of the second child of child 2',
save=True)
ggc1.parent_topic = gc3
ggc1.save()
assert [c1, gc1, c2, gc2, gc3, ggc1] == top.get_descendants()
@pytest.mark.move
def test_circular_dependency(self):
"""Make sure we can detect potential circular dependencies in
parent/child relationships."""
# Test detection at one level removed.
parent = document(title='Parent of circular-dependency document',
save=True)
child = document(title='Document with circular dependency')
child.parent_topic = parent
child.save()
assert child.is_child_of(parent)
# And at two levels removed.
grandparent = document(title='Grandparent of '
'circular-dependency document')
parent.parent_topic = grandparent
child.save()
assert child.is_child_of(grandparent)
@pytest.mark.move
def test_move_tree(self):
"""Moving a tree of documents does the correct thing"""
# Simple multi-level tree:
#
# - top
# - child1
# - child2
# - grandchild
top = revision(title='Top-level parent for tree moves',
slug='first-level/parent',
is_approved=True,
save=True)
old_top_id = top.id
top_doc = top.document
child1 = revision(title='First child of tree-move parent',
slug='first-level/second-level/child1',
is_approved=True,
save=True)
old_child1_id = child1.id
child1_doc = child1.document
child1_doc.parent_topic = top_doc
child1_doc.save()
child2 = revision(title='Second child of tree-move parent',
slug='first-level/second-level/child2',
is_approved=True,
save=True)
old_child2_id = child2.id
child2_doc = child2.document
child2_doc.parent_topic = top_doc
child2.save()
grandchild = revision(title='Child of second child of tree-move parent',
slug='first-level/second-level/third-level/grandchild',
is_approved=True,
save=True)
old_grandchild_id = grandchild.id
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child2_doc
grandchild_doc.save()
revision(title='New Top-level bucket for tree moves',
slug='new-prefix',
is_approved=True,
save=True)
revision(title='New first-level parent for tree moves',
slug='new-prefix/first-level',
is_approved=True,
save=True)
# Now we do a simple move: inserting a prefix that needs to be
# inherited by the whole tree.
top_doc._move_tree('new-prefix/first-level/parent')
# And for each document verify three things:
#
# 1. The new slug is correct.
# 2. A new revision was created when the page moved.
# 3. A redirect was created.
moved_top = Document.objects.get(pk=top_doc.id)
assert ('new-prefix/first-level/parent' ==
moved_top.current_revision.slug)
assert old_top_id != moved_top.current_revision.id
assert (moved_top.current_revision.slug in
Document.objects.get(slug='first-level/parent').get_redirect_url())
moved_child1 = Document.objects.get(pk=child1_doc.id)
assert ('new-prefix/first-level/parent/child1' ==
moved_child1.current_revision.slug)
assert old_child1_id != moved_child1.current_revision.id
assert moved_child1.current_revision.slug in Document.objects.get(
slug='first-level/second-level/child1').get_redirect_url()
moved_child2 = Document.objects.get(pk=child2_doc.id)
assert ('new-prefix/first-level/parent/child2' ==
moved_child2.current_revision.slug)
assert old_child2_id != moved_child2.current_revision.id
assert moved_child2.current_revision.slug in Document.objects.get(
slug='first-level/second-level/child2').get_redirect_url()
moved_grandchild = Document.objects.get(pk=grandchild_doc.id)
assert('new-prefix/first-level/parent/child2/grandchild' ==
moved_grandchild.current_revision.slug)
assert old_grandchild_id != moved_grandchild.current_revision.id
assert moved_grandchild.current_revision.slug in Document.objects.get(
slug='first-level/second-level/third-level/grandchild').get_redirect_url()
@pytest.mark.move
def test_conflicts(self):
top = revision(title='Test page-move conflict detection',
slug='test-move-conflict-detection',
is_approved=True,
save=True)
top_doc = top.document
child = revision(title='Child of conflict detection test',
slug='move-tests/conflict-child',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = top_doc
child_doc.save()
# We should find the conflict if it's at the slug the document
# will move to.
top_conflict = revision(title='Conflicting document for move conflict detection',
slug='moved/test-move-conflict-detection',
is_approved=True,
save=True)
assert([top_conflict.document] ==
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
# Or if it will involve a child document.
child_conflict = revision(title='Conflicting child for move conflict detection',
slug='moved/test-move-conflict-detection/conflict-child',
is_approved=True,
save=True)
assert ([top_conflict.document, child_conflict.document] ==
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
# But a redirect should not trigger a conflict.
revision(title='Conflicting document for move conflict detection',
slug='moved/test-move-conflict-detection',
content='REDIRECT <a class="redirect" href="/foo">Foo</a>',
document=top_conflict.document,
is_approved=True,
save=True)
assert ([child_conflict.document] ==
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
@pytest.mark.move
def test_additional_conflicts(self):
top = revision(title='WebRTC',
slug='WebRTC',
content='WebRTC',
is_approved=True,
save=True)
top_doc = top.document
child1 = revision(title='WebRTC Introduction',
slug='WebRTC/WebRTC_Introduction',
content='WebRTC Introduction',
is_approved=True,
save=True)
child1_doc = child1.document
child1_doc.parent_topic = top_doc
child1_doc.save()
child2 = revision(title='Taking webcam photos',
slug='WebRTC/Taking_webcam_photos',
is_approved=True,
save=True)
child2_doc = child2.document
child2_doc.parent_topic = top_doc
child2_doc.save()
assert not top_doc._tree_conflicts('NativeRTC')
@pytest.mark.move
def test_preserve_tags(self):
tags = "'moving', 'tests'"
rev = revision(title='Test page-move tag preservation',
slug='page-move-tags',
tags=tags,
is_approved=True,
save=True)
rev.review_tags.set('technical')
rev = Revision.objects.get(pk=rev.id)
revision(title='New Top-level parent for tree moves',
slug='new-top',
is_approved=True,
save=True)
doc = rev.document
doc._move_tree('new-top/page-move-tags')
moved_doc = Document.objects.get(pk=doc.id)
new_rev = moved_doc.current_revision
assert tags == new_rev.tags
assert (['technical'] ==
[str(tag) for tag in new_rev.review_tags.all()])
@pytest.mark.move
def test_move_tree_breadcrumbs(self):
"""Moving a tree of documents under an existing doc updates breadcrumbs"""
grandpa = revision(title='Top-level parent for breadcrumb move',
slug='grandpa', is_approved=True, save=True)
grandpa_doc = grandpa.document
dad = revision(title='Mid-level parent for breadcrumb move',
slug='grandpa/dad', is_approved=True, save=True)
dad_doc = dad.document
dad_doc.parent_topic = grandpa_doc
dad_doc.save()
son = revision(title='Bottom-level child for breadcrumb move',
slug='grandpa/dad/son', is_approved=True, save=True)
son_doc = son.document
son_doc.parent_topic = dad_doc
son_doc.save()
grandma = revision(title='Top-level parent for breadcrumb move',
slug='grandma', is_approved=True, save=True)
grandma_doc = grandma.document
mom = revision(title='Mid-level parent for breadcrumb move',
slug='grandma/mom', is_approved=True, save=True)
mom_doc = mom.document
mom_doc.parent_topic = grandma_doc
mom_doc.save()
daughter = revision(title='Bottom-level child for breadcrumb move',
slug='grandma/mom/daughter',
is_approved=True,
save=True)
daughter_doc = daughter.document
daughter_doc.parent_topic = mom_doc
daughter_doc.save()
# move grandma under grandpa
grandma_doc._move_tree('grandpa/grandma')
# assert the parent_topics are correctly rooted at grandpa
# note we have to refetch these to see any DB changes.
grandma_moved = Document.objects.get(locale=grandma_doc.locale,
slug='grandpa/grandma')
assert grandma_moved.parent_topic == grandpa_doc
mom_moved = Document.objects.get(locale=mom_doc.locale,
slug='grandpa/grandma/mom')
assert mom_moved.parent_topic == grandma_moved
@pytest.mark.move
def test_move_tree_no_new_parent(self):
"""Moving a tree to a slug that doesn't exist throws error."""
rev = revision(title='doc to move',
slug='doc1', is_approved=True, save=True)
doc = rev.document
with pytest.raises(Exception):
doc._move_tree('slug-that-doesnt-exist/doc1')
@pytest.mark.move
def test_move_top_level_docs(self):
"""Moving a top document to a new slug location"""
page_to_move_title = 'Page Move Root'
page_to_move_slug = 'Page_Move_Root'
page_child_slug = 'Page_Move_Root/Page_Move_Child'
page_moved_slug = 'Page_Move_Root_Moved'
page_child_moved_slug = 'Page_Move_Root_Moved/Page_Move_Child'
page_to_move_doc = document(title=page_to_move_title,
slug=page_to_move_slug,
save=True)
rev = revision(document=page_to_move_doc,
title=page_to_move_title,
slug=page_to_move_slug,
save=True)
page_to_move_doc.current_revision = rev
page_to_move_doc.save()
page_child = revision(title='child', slug=page_child_slug,
is_approved=True, save=True)
page_child_doc = page_child.document
page_child_doc.parent_topic = page_to_move_doc
page_child_doc.save()
# move page to new slug
new_title = page_to_move_title + ' Moved'
page_to_move_doc._move_tree(page_moved_slug, user=None,
title=new_title)
page_to_move_doc = Document.objects.get(slug=page_to_move_slug)
page_moved_doc = Document.objects.get(slug=page_moved_slug)
page_child_doc = Document.objects.get(slug=page_child_slug)
page_child_moved_doc = Document.objects.get(slug=page_child_moved_slug)
assert 'REDIRECT' in page_to_move_doc.html
assert page_moved_slug in page_to_move_doc.html
assert new_title in page_to_move_doc.html
assert page_moved_doc
assert 'REDIRECT' in page_child_doc.html
assert page_moved_slug in page_child_doc.html
assert page_child_moved_doc
# TODO: Fix this assertion?
# assert 'admin' == page_moved_doc.current_revision.creator.username)
@pytest.mark.move
def test_mid_move(self):
root_title = 'Root'
root_slug = 'Root'
child_title = 'Child'
child_slug = 'Root/Child'
moved_child_slug = 'DiffChild'
grandchild_title = 'Grandchild'
grandchild_slug = 'Root/Child/Grandchild'
moved_grandchild_slug = 'DiffChild/Grandchild'
root_doc = document(title=root_title,
slug=root_slug,
save=True)
rev = revision(document=root_doc,
title=root_title,
slug=root_slug,
save=True)
root_doc.current_revision = rev
root_doc.save()
child = revision(title=child_title, slug=child_slug,
is_approved=True, save=True)
child_doc = child.document
child_doc.parent_topic = root_doc
child_doc.save()
grandchild = revision(title=grandchild_title,
slug=grandchild_slug,
is_approved=True, save=True)
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child_doc
grandchild_doc.save()
child_doc._move_tree(moved_child_slug)
redirected_child = Document.objects.get(slug=child_slug)
Document.objects.get(slug=moved_child_slug)
assert 'REDIRECT' in redirected_child.html
assert moved_child_slug in redirected_child.html
redirected_grandchild = Document.objects.get(slug=grandchild_doc.slug)
Document.objects.get(slug=moved_grandchild_slug)
assert 'REDIRECT' in redirected_grandchild.html
assert moved_grandchild_slug in redirected_grandchild.html
@pytest.mark.move
def test_move_special(self):
root_slug = 'User:foo'
child_slug = '%s/child' % root_slug
new_root_slug = 'User:foobar'
special_root = document(title='User:foo',
slug=root_slug,
save=True)
revision(document=special_root,
title=special_root.title,
slug=root_slug,
save=True)
special_child = document(title='User:foo child',
slug=child_slug,
save=True)
revision(document=special_child,
title=special_child.title,
slug=child_slug,
save=True)
special_child.parent_topic = special_root
special_child.save()
original_root_id = special_root.id
original_child_id = special_child.id
# First move, to new slug.
special_root._move_tree(new_root_slug)
# Appropriate redirects were left behind.
root_redirect = Document.objects.get(locale=special_root.locale,
slug=root_slug)
assert root_redirect.is_redirect
root_redirect_id = root_redirect.id
child_redirect = Document.objects.get(locale=special_child.locale,
slug=child_slug)
assert child_redirect.is_redirect
child_redirect_id = child_redirect.id
# Moved documents still have the same IDs.
moved_root = Document.objects.get(locale=special_root.locale,
slug=new_root_slug)
assert original_root_id == moved_root.id
moved_child = Document.objects.get(locale=special_child.locale,
slug='%s/child' % new_root_slug)
assert original_child_id == moved_child.id
# Second move, back to original slug.
moved_root._move_tree(root_slug)
# Once again we left redirects behind.
root_second_redirect = Document.objects.get(locale=special_root.locale,
slug=new_root_slug)
assert root_second_redirect.is_redirect
child_second_redirect = Document.objects.get(locale=special_child.locale,
slug='%s/child' % new_root_slug)
assert child_second_redirect.is_redirect
# The documents at the original URLs aren't redirects anymore.
rerooted_root = Document.objects.get(locale=special_root.locale,
slug=root_slug)
assert not rerooted_root.is_redirect
rerooted_child = Document.objects.get(locale=special_child.locale,
slug=child_slug)
assert not rerooted_child.is_redirect
# The redirects created in the first move no longer exist in the DB.
self.assertRaises(Document.DoesNotExist,
Document.objects.get,
id=root_redirect_id)
self.assertRaises(Document.DoesNotExist,
Document.objects.get,
id=child_redirect_id)
def test_fail_message(self):
"""
When page move fails in moving one of the children, it
generates an informative exception message explaining which
child document failed.
"""
top = revision(title='Test page-move error messaging',
slug='test-move-error-messaging',
is_approved=True,
save=True)
top_doc = top.document
child = revision(title='Child to test page-move error messaging',
slug='test-move-error-messaging/child',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = top_doc
child_doc.save()
grandchild = revision(title='Grandchild to test page-move error handling',
slug='test-move-error-messaging/child/grandchild',
is_approved=True,
save=True)
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child_doc
grandchild_doc.save()
revision(title='Conflict page for page-move error handling',
slug='test-move-error-messaging/moved/grandchild',
is_approved=True,
save=True)
mentioned_url = (
f'https://developer.mozilla.org/{grandchild_doc.locale}'
f'/docs/{grandchild_doc.slug}')
with self.assertRaisesRegex(PageMoveError, mentioned_url):
child_doc._move_tree('test-move-error-messaging/moved')
class RevisionIPTests(UserTestCase):
def test_delete_older_than_default_30_days(self):
old_date = date.today() - timedelta(days=31)
r = revision(created=old_date, save=True)
RevisionIP.objects.create(revision=r, ip='127.0.0.1').save()
assert 1 == RevisionIP.objects.all().count()
RevisionIP.objects.delete_old()
assert 0 == RevisionIP.objects.all().count()
def test_delete_older_than_days_argument(self):
rev_date = date.today() - timedelta(days=5)
r = revision(created=rev_date, save=True)
RevisionIP.objects.create(revision=r, ip='127.0.0.1').save()
assert 1 == RevisionIP.objects.all().count()
RevisionIP.objects.delete_old(days=4)
assert 0 == RevisionIP.objects.all().count()
def test_delete_older_than_only_deletes_older_than(self):
oldest_date = date.today() - timedelta(days=31)
r1 = revision(created=oldest_date, save=True)
RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save()
old_date = date.today() - timedelta(days=29)
r1 = revision(created=old_date, save=True)
RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save()
now_date = date.today()
r2 = revision(created=now_date, save=True)
RevisionIP.objects.create(revision=r2, ip='127.0.0.1').save()
assert 3 == RevisionIP.objects.all().count()
RevisionIP.objects.delete_old()
assert 2 == RevisionIP.objects.all().count()
class AttachmentTests(UserTestCase):
def new_attachment(self, mindtouch_attachment_id=666):
attachment = Attachment(
title='test attachment',
mindtouch_attachment_id=mindtouch_attachment_id,
)
attachment.save()
attachment_revision = AttachmentRevision(
attachment=attachment,
file='some/path.ext',
mime_type='application/kuma',
creator=get_user(username='admin'),
title='test attachment',
)
attachment_revision.save()
return attachment, attachment_revision
def test_popuplate_deki_file_url(self):
attachment, attachment_revision = self.new_attachment()
html = ("""%s%s/@api/deki/files/%s/=""" %
(settings.PROTOCOL, settings.ATTACHMENT_HOST,
attachment.mindtouch_attachment_id))
doc = document(html=html, save=True)
doc.populate_attachments()
assert doc.attached_files.all().exists()
assert 1 == doc.attached_files.all().count()
assert attachment == doc.attached_files.first().file
def test_popuplate_kuma_file_url(self):
attachment, attachment_revision = self.new_attachment()
doc = document(html=attachment.get_file_url(), save=True)
assert not doc.attached_files.all().exists()
populated = doc.populate_attachments()
assert 1 == len(populated)
assert doc.attached_files.all().exists()
assert 1 == doc.attached_files.all().count()
assert attachment == doc.attached_files.first().file
def test_popuplate_multiple_attachments(self):
attachment, attachment_revision = self.new_attachment()
attachment2, attachment_revision2 = self.new_attachment()
html = ("%s %s" %
(attachment.get_file_url(), attachment2.get_file_url()))
doc = document(html=html, save=True)
populated = doc.populate_attachments()
attachments = doc.attached_files.all()
assert 2 == len(populated)
assert attachments.exists()
assert 2 == attachments.count()
assert attachment == attachments[0].file
assert attachment2 == attachments[1].file
|
samuelmaudo/yepes
|
refs/heads/master
|
yepes/utils/unidecode/x061.py
|
252
|
data = (
'Qiao ', # 0x00
'Chou ', # 0x01
'Bei ', # 0x02
'Xuan ', # 0x03
'Wei ', # 0x04
'Ge ', # 0x05
'Qian ', # 0x06
'Wei ', # 0x07
'Yu ', # 0x08
'Yu ', # 0x09
'Bi ', # 0x0a
'Xuan ', # 0x0b
'Huan ', # 0x0c
'Min ', # 0x0d
'Bi ', # 0x0e
'Yi ', # 0x0f
'Mian ', # 0x10
'Yong ', # 0x11
'Kai ', # 0x12
'Dang ', # 0x13
'Yin ', # 0x14
'E ', # 0x15
'Chen ', # 0x16
'Mou ', # 0x17
'Ke ', # 0x18
'Ke ', # 0x19
'Yu ', # 0x1a
'Ai ', # 0x1b
'Qie ', # 0x1c
'Yan ', # 0x1d
'Nuo ', # 0x1e
'Gan ', # 0x1f
'Yun ', # 0x20
'Zong ', # 0x21
'Sai ', # 0x22
'Leng ', # 0x23
'Fen ', # 0x24
'[?] ', # 0x25
'Kui ', # 0x26
'Kui ', # 0x27
'Que ', # 0x28
'Gong ', # 0x29
'Yun ', # 0x2a
'Su ', # 0x2b
'Su ', # 0x2c
'Qi ', # 0x2d
'Yao ', # 0x2e
'Song ', # 0x2f
'Huang ', # 0x30
'Ji ', # 0x31
'Gu ', # 0x32
'Ju ', # 0x33
'Chuang ', # 0x34
'Ni ', # 0x35
'Xie ', # 0x36
'Kai ', # 0x37
'Zheng ', # 0x38
'Yong ', # 0x39
'Cao ', # 0x3a
'Sun ', # 0x3b
'Shen ', # 0x3c
'Bo ', # 0x3d
'Kai ', # 0x3e
'Yuan ', # 0x3f
'Xie ', # 0x40
'Hun ', # 0x41
'Yong ', # 0x42
'Yang ', # 0x43
'Li ', # 0x44
'Sao ', # 0x45
'Tao ', # 0x46
'Yin ', # 0x47
'Ci ', # 0x48
'Xu ', # 0x49
'Qian ', # 0x4a
'Tai ', # 0x4b
'Huang ', # 0x4c
'Yun ', # 0x4d
'Shen ', # 0x4e
'Ming ', # 0x4f
'[?] ', # 0x50
'She ', # 0x51
'Cong ', # 0x52
'Piao ', # 0x53
'Mo ', # 0x54
'Mu ', # 0x55
'Guo ', # 0x56
'Chi ', # 0x57
'Can ', # 0x58
'Can ', # 0x59
'Can ', # 0x5a
'Cui ', # 0x5b
'Min ', # 0x5c
'Te ', # 0x5d
'Zhang ', # 0x5e
'Tong ', # 0x5f
'Ao ', # 0x60
'Shuang ', # 0x61
'Man ', # 0x62
'Guan ', # 0x63
'Que ', # 0x64
'Zao ', # 0x65
'Jiu ', # 0x66
'Hui ', # 0x67
'Kai ', # 0x68
'Lian ', # 0x69
'Ou ', # 0x6a
'Song ', # 0x6b
'Jin ', # 0x6c
'Yin ', # 0x6d
'Lu ', # 0x6e
'Shang ', # 0x6f
'Wei ', # 0x70
'Tuan ', # 0x71
'Man ', # 0x72
'Qian ', # 0x73
'She ', # 0x74
'Yong ', # 0x75
'Qing ', # 0x76
'Kang ', # 0x77
'Di ', # 0x78
'Zhi ', # 0x79
'Lou ', # 0x7a
'Juan ', # 0x7b
'Qi ', # 0x7c
'Qi ', # 0x7d
'Yu ', # 0x7e
'Ping ', # 0x7f
'Liao ', # 0x80
'Cong ', # 0x81
'You ', # 0x82
'Chong ', # 0x83
'Zhi ', # 0x84
'Tong ', # 0x85
'Cheng ', # 0x86
'Qi ', # 0x87
'Qu ', # 0x88
'Peng ', # 0x89
'Bei ', # 0x8a
'Bie ', # 0x8b
'Chun ', # 0x8c
'Jiao ', # 0x8d
'Zeng ', # 0x8e
'Chi ', # 0x8f
'Lian ', # 0x90
'Ping ', # 0x91
'Kui ', # 0x92
'Hui ', # 0x93
'Qiao ', # 0x94
'Cheng ', # 0x95
'Yin ', # 0x96
'Yin ', # 0x97
'Xi ', # 0x98
'Xi ', # 0x99
'Dan ', # 0x9a
'Tan ', # 0x9b
'Duo ', # 0x9c
'Dui ', # 0x9d
'Dui ', # 0x9e
'Su ', # 0x9f
'Jue ', # 0xa0
'Ce ', # 0xa1
'Xiao ', # 0xa2
'Fan ', # 0xa3
'Fen ', # 0xa4
'Lao ', # 0xa5
'Lao ', # 0xa6
'Chong ', # 0xa7
'Han ', # 0xa8
'Qi ', # 0xa9
'Xian ', # 0xaa
'Min ', # 0xab
'Jing ', # 0xac
'Liao ', # 0xad
'Wu ', # 0xae
'Can ', # 0xaf
'Jue ', # 0xb0
'Cu ', # 0xb1
'Xian ', # 0xb2
'Tan ', # 0xb3
'Sheng ', # 0xb4
'Pi ', # 0xb5
'Yi ', # 0xb6
'Chu ', # 0xb7
'Xian ', # 0xb8
'Nao ', # 0xb9
'Dan ', # 0xba
'Tan ', # 0xbb
'Jing ', # 0xbc
'Song ', # 0xbd
'Han ', # 0xbe
'Jiao ', # 0xbf
'Wai ', # 0xc0
'Huan ', # 0xc1
'Dong ', # 0xc2
'Qin ', # 0xc3
'Qin ', # 0xc4
'Qu ', # 0xc5
'Cao ', # 0xc6
'Ken ', # 0xc7
'Xie ', # 0xc8
'Ying ', # 0xc9
'Ao ', # 0xca
'Mao ', # 0xcb
'Yi ', # 0xcc
'Lin ', # 0xcd
'Se ', # 0xce
'Jun ', # 0xcf
'Huai ', # 0xd0
'Men ', # 0xd1
'Lan ', # 0xd2
'Ai ', # 0xd3
'Lin ', # 0xd4
'Yan ', # 0xd5
'Gua ', # 0xd6
'Xia ', # 0xd7
'Chi ', # 0xd8
'Yu ', # 0xd9
'Yin ', # 0xda
'Dai ', # 0xdb
'Meng ', # 0xdc
'Ai ', # 0xdd
'Meng ', # 0xde
'Dui ', # 0xdf
'Qi ', # 0xe0
'Mo ', # 0xe1
'Lan ', # 0xe2
'Men ', # 0xe3
'Chou ', # 0xe4
'Zhi ', # 0xe5
'Nuo ', # 0xe6
'Nuo ', # 0xe7
'Yan ', # 0xe8
'Yang ', # 0xe9
'Bo ', # 0xea
'Zhi ', # 0xeb
'Kuang ', # 0xec
'Kuang ', # 0xed
'You ', # 0xee
'Fu ', # 0xef
'Liu ', # 0xf0
'Mie ', # 0xf1
'Cheng ', # 0xf2
'[?] ', # 0xf3
'Chan ', # 0xf4
'Meng ', # 0xf5
'Lan ', # 0xf6
'Huai ', # 0xf7
'Xuan ', # 0xf8
'Rang ', # 0xf9
'Chan ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Huan ', # 0xfd
'She ', # 0xfe
'Yi ', # 0xff
)
|
ddzialak/boto
|
refs/heads/develop
|
boto/ec2/spotdatafeedsubscription.py
|
152
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Spot Instance Datafeed Subscription
"""
from boto.ec2.ec2object import EC2Object
from boto.ec2.spotinstancerequest import SpotInstanceStateFault
class SpotDatafeedSubscription(EC2Object):
def __init__(self, connection=None, owner_id=None,
bucket=None, prefix=None, state=None, fault=None):
super(SpotDatafeedSubscription, self).__init__(connection)
self.owner_id = owner_id
self.bucket = bucket
self.prefix = prefix
self.state = state
self.fault = fault
def __repr__(self):
return 'SpotDatafeedSubscription:%s' % self.bucket
def startElement(self, name, attrs, connection):
if name == 'fault':
self.fault = SpotInstanceStateFault()
return self.fault
else:
return None
def endElement(self, name, value, connection):
if name == 'ownerId':
self.owner_id = value
elif name == 'bucket':
self.bucket = value
elif name == 'prefix':
self.prefix = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_spot_datafeed_subscription(
dry_run=dry_run
)
|
MaryanMorel/faker
|
refs/heads/master
|
faker/providers/phone_number/sl_SI/__init__.py
|
21
|
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
'040 ### ###',
'041 ### ###',
'031 ### ###',
'030 ### ###',
'070 ### ###',
'01 #### ###',
'02 #### ###',
'04 #### ###',
'05 #### ###',
'06 #### ###',
'08 #### ###',
)
|
valdecdev/odoo
|
refs/heads/master
|
addons/payment_transfer/__init__.py
|
616
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
import controllers
|
theonion/django-bulbs
|
refs/heads/master
|
bulbs/utils/fields.py
|
1
|
from rest_framework import serializers
class RichTextField(serializers.CharField):
def __init__(self, *args, **kwargs):
self.field_size = kwargs.pop("field_size", None)
super(RichTextField, self).__init__(*args, **kwargs)
|
40223236/2015cd_midterm_1
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/copy.py
|
628
|
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
# module org.python.core does not exist in Brython, so lets just ignore
# this import request.
#try:
# from org.python.core import PyStringMap
#except ImportError:
# PyStringMap = None
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
|
Changaco/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/doc/historic/2003/pycon/deferex/deferex-listing2.py
|
40
|
def successCallback(result):
myResult = result + 1
print myResult
return myResult
...
adder.callRemote("add", 1, 1).addCallback(successCallback)
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/pydocfodder.py
|
195
|
"""Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Get called', self, inst
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print 'Set called', self, inst, val
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Del called', self, inst
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
|
ValFadeev/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_queue.py
|
157
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
short_description: create / delete a queue in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
name:
description:
- Name to give the queue
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Queue
gather_facts: False
hosts: local
connection: local
tasks:
- name: Queue create request
local_action:
module: rax_queue
credentials: ~/.raxpub
name: my-queue
region: DFW
state: present
register: my_queue
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_queue(module, state, name):
for arg in (state, name):
if not arg:
module.fail_json(msg='%s is required for rax_queue' % arg)
changed = False
queues = []
instance = {}
cq = pyrax.queues
if not cq:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
continue
queues.append(queue)
if len(queues) > 1:
module.fail_json(msg='Multiple Queues were matched by name')
if state == 'present':
if not queues:
try:
queue = cq.create(name)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
queue = queues[0]
instance = dict(name=queue.name)
result = dict(changed=changed, queue=instance)
module.exit_json(**result)
elif state == 'absent':
if queues:
queue = queues[0]
try:
queue.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, queue=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_queue(module, state, name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
bert9bert/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/tsa/examples/try_ld_nitime.py
|
34
|
'''Levinson Durbin recursion adjusted from nitime
'''
from statsmodels.compat.python import range
import numpy as np
from statsmodels.tsa.stattools import acovf
def levinson_durbin_nitime(s, order=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
'''
#from nitime
## if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
## else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order+1] #not tested
phi = np.zeros((order+1, order+1), 'd')
sig = np.zeros(order+1)
# initial points for the recursion
phi[1,1] = sxx_m[1]/sxx_m[0]
sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1]
for k in range(2,order+1):
phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1]
for j in range(1,k):
phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1]
sig[k] = sig[k-1]*(1 - phi[k,k]**2)
sigma_v = sig[-1]; arcoefs = phi[1:,-1]
return sigma_v, arcoefs, pacf, phi #return everything
import nitime.utils as ut
sxx=None
order = 10
npts = 2048*10
sigma = 1
drop_transients = 1024
coefs = np.array([0.9, -0.5])
# Generate AR(2) time series
X, v, _ = ut.ar_generator(npts, sigma, coefs, drop_transients)
s = X
import statsmodels.api as sm
sm.tsa.stattools.pacf(X)
|
dataewan/deep-learning
|
refs/heads/master
|
tv-script-generation/problem_unittests.py
|
68
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert np.array_equal(batches[-1,1,-1], last_seq),\
'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32)
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_embed_dim = 300
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message()
|
IllusionRom-deprecated/android_platform_tools_idea
|
refs/heads/master
|
python/testData/optimizeImports/oneOfMultiple.after.py
|
84
|
import sys
print sys.argv
|
maxive/erp
|
refs/heads/master
|
addons/purchase/tests/common.py
|
2
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from odoo import fields
from odoo.addons.stock.tests.common2 import TestStockCommon
from odoo import tools
from odoo.modules.module import get_module_resource
class TestPurchase(TestStockCommon):
def _create_make_procurement(self, product, product_qty, date_planned=False):
ProcurementGroup = self.env['procurement.group']
order_values = {
'warehouse_id': self.warehouse_1,
'date_planned': date_planned or fields.Datetime.to_string(fields.datetime.now() + timedelta(days=10)), # 10 days added to current date of procurement to get future schedule date and order date of purchase order.
'group_id': self.env['procurement.group'],
}
return ProcurementGroup.run(product, product_qty, self.uom_unit, self.warehouse_1.lot_stock_id, product.name, '/', order_values)
def _load(self, module, *args):
tools.convert_file(self.cr, 'purchase',
get_module_resource(module, *args),
{}, 'init', False, 'test', self.registry._assertion_report)
@classmethod
def setUpClass(cls):
super(TestPurchase, cls).setUpClass()
cls.route_buy = cls.warehouse_1.buy_pull_id.route_id.id
cls.route_mto = cls.warehouse_1.mto_pull_id.route_id.id
# Update product_1 with type, route and Delivery Lead Time
cls.product_1.write({
'type': 'product',
'route_ids': [(6, 0, [cls.route_buy, cls.route_mto])],
'seller_ids': [(0, 0, {'name': cls.partner_1.id, 'delay': 5})]})
# Update product_2 with type, route and Delivery Lead Time
cls.product_2.write({
'type': 'product',
'route_ids': [(6, 0, [cls.route_buy, cls.route_mto])],
'seller_ids': [(0, 0, {'name': cls.partner_1.id, 'delay': 2})]})
cls.res_users_purchase_user = cls.env['res.users'].create({
'company_id': cls.env.ref('base.main_company').id,
'name': "Purchase User",
'login': "pu",
'email': "purchaseuser@yourcompany.com",
'groups_id': [(6, 0, [cls.env.ref('purchase.group_purchase_user').id])],
})
|
786228836/linux
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
293
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# ubuntu:
#
# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
# $ sudo su - postgres
# $ createuser -s <your user id here>
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# call-graph-from-postgresql.py. Refer to that script for details.
#
# Tables:
#
# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
#
# samples
#
# 'samples' is the main table. It represents what instruction was executing at a point in time
# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
#
# calls
#
# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
# 'calls' is only created when the 'calls' option to this script is specified.
#
# call_paths
#
# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
# 'calls_paths' is only created when the 'calls' option to this script is specified.
#
# branch_types
#
# 'branch_types' provides descriptions for each type of branch.
#
# comm_threads
#
# 'comm_threads' shows how 'comms' relates to 'threads'.
#
# comms
#
# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
#
# dsos
#
# 'dsos' contains a record for each executable file or library.
#
# machines
#
# 'machines' can be used to distinguish virtual machines if virtualization is supported.
#
# selected_events
#
# 'selected_events' contains a record for each kind of event that has been sampled.
#
# symbols
#
# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
#
# threads
#
# 'threads' contains a record for each thread.
#
# Views:
#
# Most of the tables have views for more friendly display. The views are:
#
# calls_view
# call_paths_view
# comm_threads_view
# dsos_view
# machines_view
# samples_view
# symbols_view
# threads_view
#
# More examples of browsing the database with psql:
# Note that some of the examples are not the most optimal SQL query.
# Note that call information is only available if the script's 'calls' option has been used.
#
# Top 10 function calls (not aggregated by symbol):
#
# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
#
# Top 10 function calls (aggregated by symbol):
#
# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
#
# Note that the branch count gives a rough estimation of cpu usage, so functions
# that took a long time but have a relatively low branch count must have spent time
# waiting.
#
# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
#
# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
#
# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
#
# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
#
# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
#
# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
#
# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
#
# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
#
# Show transactions:
#
# SELECT * FROM samples_view WHERE event = 'transactions';
#
# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
# Transaction aborts have branch_type_name 'transaction abort'
#
# Show transaction aborts:
#
# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
#
# To print a call stack requires walking the call_paths table. For example this python script:
# #!/usr/bin/python2
#
# import sys
# from PySide.QtSql import *
#
# if __name__ == '__main__':
# if (len(sys.argv) < 3):
# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
# raise Exception("Too few arguments")
# dbname = sys.argv[1]
# call_path_id = sys.argv[2]
# db = QSqlDatabase.addDatabase('QPSQL')
# db.setDatabaseName(dbname)
# if not db.open():
# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
# query = QSqlQuery(db)
# print " id ip symbol_id symbol dso_id dso_short_name"
# while call_path_id != 0 and call_path_id != 1:
# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
# if not ret:
# raise Exception("Query failed: " + query.lastError().text())
# if not query.next():
# raise Exception("Query failed")
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
# call_path_id = query.value(6)
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
print >> sys.stderr, " callchains 'callchains' => create call_paths table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
for i in range(3,len(sys.argv)):
if (sys.argv[i] == "calls"):
perf_db_export_calls = True
elif (sys.argv[i] == "callchains"):
perf_db_export_callchains = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
'to_hex(c.ip) AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
'to_hex(p.ip) AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
'to_hex(ip) AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'call_id,'
'return_id,'
'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls or perf_db_export_callchains:
call_path_file = open_output_file("call_path_table.bin")
if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls or perf_db_export_callchains:
copy_output_file(call_path_file, "call_paths")
if perf_db_export_calls:
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls or perf_db_export_callchains:
remove_output_file(call_path_file)
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
tbachman/group-based-policy
|
refs/heads/master
|
setup.py
|
463
|
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
Nic30/hwtLib
|
refs/heads/master
|
hwtLib/examples/arithmetic/vhdl_vector_auto_casts.py
|
1
|
from hwt.hdl.types.bits import Bits
from hwt.interfaces.std import Signal
from hwt.synthesizer.unit import Unit
from hwtLib.examples.base_serialization_TC import BaseSerializationTC
class VhdlVectorAutoCastExample(Unit):
def _declr(self):
std_logic = Bits(1)
std_logic_vector_0 = Bits(1, force_vector=True)
self.a = Signal(dtype=std_logic)
self.b = Signal(dtype=std_logic)._m()
self.c = Signal(dtype=std_logic_vector_0)._m()
self.d = Signal(dtype=std_logic_vector_0)
self.e = Signal(dtype=std_logic)._m()
self.f = Signal(dtype=std_logic)
self.g = Signal(dtype=std_logic_vector_0)
self.i = Signal(dtype=std_logic)._m()
self.j = Signal(dtype=std_logic)._m()
def _impl(self):
# no conversion
self.b(self.a)
# std_logic -> std_logic_vector
self.c(self.a)
# std_logic_vector -> std_logic
self.e(self.d)
# unsigned(std_logic) + unsigned(std_logic_vector) -> std_logic_vector -> std_logic
self.i(self.f + self.g)
# unsigned(std_logic) + unsigned(std_logic_vector) -> std_logic_vector -> std_logic
self.j(self.g + self.f)
class VhdlVectorAutoCastExampleTC(BaseSerializationTC):
__FILE__ = __file__
def test_vhdl(self):
u = VhdlVectorAutoCastExample()
self.assert_serializes_as_file(u, "VhdlVectorAutoCastExample.vhd")
if __name__ == '__main__':
from hwt.synthesizer.utils import to_rtl_str
from hwt.serializer.vhdl import Vhdl2008Serializer
u = VhdlVectorAutoCastExample()
print(to_rtl_str(u, Vhdl2008Serializer))
import unittest
suite = unittest.TestSuite()
# suite.addTest(VhdlVectorAutoCastExampleTC('test_vhdl'))
suite.addTest(unittest.makeSuite(VhdlVectorAutoCastExampleTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-3.0/Lib/textwrap.py
|
1
|
"""Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id: textwrap.py 63335 2008-05-16 00:03:33Z alexandre.vassalotti $"
import string, re
__all__ = ['TextWrapper', 'wrap', 'fill']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
|
Epirex/android_kernel_samsung_golden
|
refs/heads/kk4.4-2
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
SSG-DRD-IOT/commercial-iot-security-system
|
refs/heads/master
|
opencv/tutorials/gui/mouse/mouseEvents_advanced.py
|
1
|
import cv2
import numpy as np
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle. press 'm' to toggle to curve
ix, iy = -1, -1
# mouse callback function
def draw_shape(event, x, y, flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0,0,255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0,0,255), -1)
# bind this callback to window; in main loop, set keyboard binding 'm'
# to toggle between rect and circle
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_shape)
while(1):
cv2.imshow('image', img)
k = cv2.waitKey(1) & 0xFF
if k == ord('m'):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows()
|
efiring/scipy
|
refs/heads/master
|
scipy/spatial/__init__.py
|
24
|
"""
=============================================================
Spatial algorithms and data structures (:mod:`scipy.spatial`)
=============================================================
.. currentmodule:: scipy.spatial
Nearest-neighbor Queries
========================
.. autosummary::
:toctree: generated/
KDTree -- class for efficient nearest-neighbor queries
cKDTree -- class for efficient nearest-neighbor queries (faster impl.)
distance -- module containing many different distance measures
Delaunay Triangulation, Convex Hulls and Voronoi Diagrams
=========================================================
.. autosummary::
:toctree: generated/
Delaunay -- compute Delaunay triangulation of input points
ConvexHull -- compute a convex hull for input points
Voronoi -- compute a Voronoi diagram hull from input points
Plotting Helpers
================
.. autosummary::
:toctree: generated/
delaunay_plot_2d -- plot 2-D triangulation
convex_hull_plot_2d -- plot 2-D convex hull
voronoi_plot_2d -- plot 2-D voronoi diagram
.. seealso:: :ref:`Tutorial <qhulltutorial>`
Simplex representation
======================
The simplices (triangles, tetrahedra, ...) appearing in the Delaunay
tesselation (N-dim simplices), convex hull facets, and Voronoi ridges
(N-1 dim simplices) are represented in the following scheme::
tess = Delaunay(points)
hull = ConvexHull(points)
voro = Voronoi(points)
# coordinates of the j-th vertex of the i-th simplex
tess.points[tess.simplices[i, j], :] # tesselation element
hull.points[hull.simplices[i, j], :] # convex hull facet
voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
For Delaunay triangulations and convex hulls, the neighborhood
structure of the simplices satisfies the condition:
``tess.neighbors[i,j]`` is the neighboring simplex of the i-th
simplex, opposite to the j-vertex. It is -1 in case of no
neighbor.
Convex hull facets also define a hyperplane equation:
(hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
Similar hyperplane equations for the Delaunay triangulation correspond
to the convex hull facets on the corresponding N+1 dimensional
paraboloid.
The Delaunay triangulation objects offer a method for locating the
simplex containing a given point, and barycentric coordinate
computations.
Functions
---------
.. autosummary::
:toctree: generated/
tsearch
distance_matrix
minkowski_distance
minkowski_distance_p
"""
from __future__ import division, print_function, absolute_import
from .kdtree import *
from .ckdtree import *
from .qhull import *
from ._plotutils import *
__all__ = [s for s in dir() if not s.startswith('_')]
__all__ += ['distance']
from . import distance
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
axsauze/eventsfinder
|
refs/heads/master
|
django/views/debug.py
|
99
|
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.importlib import import_module
from django.utils.encoding import force_bytes, smart_text
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER
modname, classname = modpath.rsplit('.', 1)
try:
mod = import_module(modname)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing default exception reporter filter %s: "%s"' % (modpath, e))
try:
default_exception_reporter_filter = getattr(mod, classname)()
except AttributeError:
raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname))
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
else:
# Potentially cleanse only the request if it's one of the frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"Return a Context instance containing traceback information."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.readlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASES</code> setting in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
|
redbear/micropython
|
refs/heads/redbear-duo
|
tests/extmod/ure_split_empty.py
|
18
|
# test splitting with pattern matches that can be empty
#
# CPython 3.5 issues a FutureWarning for these tests because their
# behaviour will change in a future version. MicroPython just stops
# splitting as soon as an empty match is found.
import ure as re
r = re.compile(" *")
s = r.split("a b c foobar")
print(s)
r = re.compile("x*")
s = r.split("foo")
print(s)
r = re.compile("x*")
s = r.split("axbc")
print(s)
|
charbeljc/hr
|
refs/heads/8.0
|
__unported__/hr_employee_state/__init__.py
|
23
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr
from . import wizard
|
lihui7115/ChromiumGStreamerBackend
|
refs/heads/master
|
chrome/tools/build/appid.py
|
186
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
appid.py -- Chromium appid header file generation utility.
"""
import optparse
import sys
GENERATED_APPID_INCLUDE_FILE_CONTENTS = """
// This file is automatically generated by appid.py.
// It contains the Google Update Appid used for this build. Note that
// the Appid will be empty for non Google Chrome builds.
namespace google_update {
const wchar_t kChromeGuid[] = L"%s";
}
"""
def GenerateAppIdHeader(opts):
contents = GENERATED_APPID_INCLUDE_FILE_CONTENTS % opts.appid
try:
ofp = open(opts.output_file, 'r')
except EnvironmentError:
current_contents = None
else:
current_contents = ofp.read()
if contents != current_contents:
open(opts.output_file, 'w').write(contents)
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--appid',
help='The Google Update App Id of the Chrome being built.')
parser.add_option('-o', '--output_file',
help='The path to the generated output header file')
(opts, args) = parser.parse_args()
if opts.appid is None or not opts.output_file:
parser.print_help()
return 1
# Log a trace in the build output when we run.
print "Generating appid header... ",
GenerateAppIdHeader(opts)
print "Done."
if __name__ == '__main__':
sys.exit(main())
|
vuntz/glance
|
refs/heads/master
|
glance/tests/unit/v1/test_upload_utils.py
|
3
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import glance_store
import mock
from mock import patch
import webob.exc
from glance.api.v1 import upload_utils
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
import glance.registry.client.v1.api as registry
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
class TestUploadUtils(base.StoreClearingUnitTest):
def setUp(self):
super(TestUploadUtils, self).setUp()
self.config(verbose=True, debug=True)
def tearDown(self):
super(TestUploadUtils, self).tearDown()
def test_initiate_delete(self):
req = unit_test_utils.get_fake_request()
location = {"url": "file://foo/bar",
"metadata": {},
"status": "active"}
id = unit_test_utils.UUID1
with patch.object(store_utils,
"safe_delete_from_backend") as mock_store_utils:
upload_utils.initiate_deletion(req, location, id)
mock_store_utils.assert_called_once_with(req.context,
id,
location)
def test_initiate_delete_with_delayed_delete(self):
self.config(delayed_delete=True)
req = unit_test_utils.get_fake_request()
location = {"url": "file://foo/bar",
"metadata": {},
"status": "active"}
id = unit_test_utils.UUID1
with patch.object(store_utils, "schedule_delayed_delete_from_backend",
return_value=True) as mock_store_utils:
upload_utils.initiate_deletion(req, location, id)
mock_store_utils.assert_called_once_with(req.context,
id,
location)
def test_safe_kill(self):
req = unit_test_utils.get_fake_request()
id = unit_test_utils.UUID1
with patch.object(registry, "update_image_metadata") as mock_registry:
upload_utils.safe_kill(req, id, 'saving')
mock_registry.assert_called_once_with(req.context, id,
{'status': 'killed'},
from_state='saving')
def test_safe_kill_with_error(self):
req = unit_test_utils.get_fake_request()
id = unit_test_utils.UUID1
with patch.object(registry, "update_image_metadata",
side_effect=Exception()) as mock_registry:
upload_utils.safe_kill(req, id, 'saving')
mock_registry.assert_called_once_with(req.context, id,
{'status': 'killed'},
from_state='saving')
@contextmanager
def _get_store_and_notifier(self, image_size=10, ext_update_data=None,
ret_checksum="checksum", exc_class=None):
location = "file://foo/bar"
checksum = "checksum"
size = 10
update_data = {'checksum': checksum}
if ext_update_data is not None:
update_data.update(ext_update_data)
image_meta = {'id': unit_test_utils.UUID1,
'size': image_size}
image_data = "blah"
store = mock.MagicMock()
notifier = mock.MagicMock()
if exc_class is not None:
store.add.side_effect = exc_class
else:
store.add.return_value = (location, size, ret_checksum, {})
yield (location, checksum, image_meta, image_data, store, notifier,
update_data)
store.add.assert_called_once_with(image_meta['id'], mock.ANY,
image_meta['size'], context=mock.ANY)
def test_upload_data_to_store(self):
# 'user_storage_quota' is not set
def store_add(image_id, data, size, **kwargs):
# Check if 'data' is instance of 'CooperativeReader' when
# 'user_storage_quota' is disabled.
self.assertIsInstance(data, utils.CooperativeReader)
return location, 10, "checksum", {}
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
ext_update_data={'size': 10},
exc_class=store_add) as (location, checksum, image_meta,
image_data, store, notifier,
update_data):
ret = image_meta.update(update_data)
with patch.object(registry, 'update_image_metadata',
return_value=ret) as mock_update_image_metadata:
actual_meta, location_data = upload_utils.upload_data_to_store(
req, image_meta, image_data, store, notifier)
self.assertEqual(location, location_data['url'])
self.assertEqual(image_meta.update(update_data), actual_meta)
mock_update_image_metadata.assert_called_once_with(
req.context, image_meta['id'], update_data,
from_state='saving')
def test_upload_data_to_store_user_storage_quota_enabled(self):
# Enable user_storage_quota
self.config(user_storage_quota='100B')
def store_add(image_id, data, size, **kwargs):
# Check if 'data' is instance of 'LimitingReader' when
# 'user_storage_quota' is enabled.
self.assertIsInstance(data, utils.LimitingReader)
return location, 10, "checksum", {}
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
ext_update_data={'size': 10},
exc_class=store_add) as (location, checksum, image_meta,
image_data, store, notifier,
update_data):
ret = image_meta.update(update_data)
# mock 'check_quota'
mock_check_quota = patch('glance.api.common.check_quota',
return_value=100)
mock_check_quota.start()
self.addCleanup(mock_check_quota.stop)
with patch.object(registry, 'update_image_metadata',
return_value=ret) as mock_update_image_metadata:
actual_meta, location_data = upload_utils.upload_data_to_store(
req, image_meta, image_data, store, notifier)
self.assertEqual(location, location_data['url'])
self.assertEqual(image_meta.update(update_data), actual_meta)
mock_update_image_metadata.assert_called_once_with(
req.context, image_meta['id'], update_data,
from_state='saving')
# 'check_quota' is called two times
check_quota_call_count =\
mock_check_quota.target.check_quota.call_count
self.assertEqual(2, check_quota_call_count)
def test_upload_data_to_store_mismatch_size(self):
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
image_size=11) as (location, checksum, image_meta, image_data,
store, notifier, update_data):
ret = image_meta.update(update_data)
with patch.object(registry, 'update_image_metadata',
return_value=ret) as mock_update_image_metadata:
self.assertRaises(webob.exc.HTTPBadRequest,
upload_utils.upload_data_to_store,
req, image_meta, image_data, store,
notifier)
mock_update_image_metadata.assert_called_with(
req.context, image_meta['id'], {'status': 'killed'},
from_state='saving')
def test_upload_data_to_store_mismatch_checksum(self):
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
ret_checksum='fake') as (location, checksum, image_meta,
image_data, store, notifier, update_data):
ret = image_meta.update(update_data)
with patch.object(registry, "update_image_metadata",
return_value=ret) as mock_update_image_metadata:
self.assertRaises(webob.exc.HTTPBadRequest,
upload_utils.upload_data_to_store,
req, image_meta, image_data, store,
notifier)
mock_update_image_metadata.assert_called_with(
req.context, image_meta['id'], {'status': 'killed'},
from_state='saving')
def _test_upload_data_to_store_exception(self, exc_class, expected_class):
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
exc_class=exc_class) as (location, checksum, image_meta,
image_data, store, notifier, update_data):
with patch.object(upload_utils, 'safe_kill') as mock_safe_kill:
self.assertRaises(expected_class,
upload_utils.upload_data_to_store,
req, image_meta, image_data, store, notifier)
mock_safe_kill.assert_called_once_with(
req, image_meta['id'], 'saving')
def _test_upload_data_to_store_exception_with_notify(self,
exc_class,
expected_class,
image_killed=True):
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
exc_class=exc_class) as (location, checksum, image_meta,
image_data, store, notifier, update_data):
with patch.object(upload_utils, 'safe_kill') as mock_safe_kill:
self.assertRaises(expected_class,
upload_utils.upload_data_to_store,
req, image_meta, image_data, store,
notifier)
if image_killed:
mock_safe_kill.assert_called_with(req, image_meta['id'],
'saving')
def test_upload_data_to_store_raises_store_disabled(self):
"""Test StoreDisabled exception is raised while uploading data"""
self._test_upload_data_to_store_exception_with_notify(
glance_store.StoreAddDisabled,
webob.exc.HTTPGone,
image_killed=True)
def test_upload_data_to_store_duplicate(self):
"""See note in glance.api.v1.upload_utils on why we don't want image to
be deleted in this case.
"""
self._test_upload_data_to_store_exception_with_notify(
exception.Duplicate,
webob.exc.HTTPConflict,
image_killed=False)
def test_upload_data_to_store_forbidden(self):
self._test_upload_data_to_store_exception_with_notify(
exception.Forbidden,
webob.exc.HTTPForbidden)
def test_upload_data_to_store_storage_full(self):
self._test_upload_data_to_store_exception_with_notify(
glance_store.StorageFull,
webob.exc.HTTPRequestEntityTooLarge)
def test_upload_data_to_store_storage_write_denied(self):
self._test_upload_data_to_store_exception_with_notify(
glance_store.StorageWriteDenied,
webob.exc.HTTPServiceUnavailable)
def test_upload_data_to_store_size_limit_exceeded(self):
self._test_upload_data_to_store_exception_with_notify(
exception.ImageSizeLimitExceeded,
webob.exc.HTTPRequestEntityTooLarge)
def test_upload_data_to_store_http_error(self):
self._test_upload_data_to_store_exception_with_notify(
webob.exc.HTTPError,
webob.exc.HTTPError)
def test_upload_data_to_store_client_disconnect(self):
self._test_upload_data_to_store_exception(
ValueError,
webob.exc.HTTPBadRequest)
def test_upload_data_to_store_client_disconnect_ioerror(self):
self._test_upload_data_to_store_exception(
IOError,
webob.exc.HTTPBadRequest)
def test_upload_data_to_store_exception(self):
self._test_upload_data_to_store_exception_with_notify(
Exception,
webob.exc.HTTPInternalServerError)
def test_upload_data_to_store_not_found_after_upload(self):
req = unit_test_utils.get_fake_request()
with self._get_store_and_notifier(
ext_update_data={'size': 10}) as (location, checksum, image_meta,
image_data, store, notifier,
update_data):
exc = exception.ImageNotFound
with patch.object(registry, 'update_image_metadata',
side_effect=exc) as mock_update_image_metadata:
with patch.object(upload_utils,
"initiate_deletion") as mock_initiate_del:
with patch.object(upload_utils,
"safe_kill") as mock_safe_kill:
self.assertRaises(webob.exc.HTTPPreconditionFailed,
upload_utils.upload_data_to_store,
req, image_meta, image_data, store,
notifier)
mock_update_image_metadata.assert_called_once_with(
req.context, image_meta['id'], update_data,
from_state='saving')
mock_initiate_del.assert_called_once_with(
req, {'url': location, 'status': 'active',
'metadata': {}}, image_meta['id'])
mock_safe_kill.assert_called_once_with(
req, image_meta['id'], 'saving')
|
GuidoZhang/PyVC
|
refs/heads/master
|
PyVC/env/Lib/posixpath.py
|
50
|
"""Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
from genericpath import _unicode
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
i = p.rfind('/') + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
head = p[:i]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
s2 = os.lstat(join(path, '..'))
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
try:
st = os.lstat(name)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_uvarprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _uvarprog
if '$' not in path:
return path
if isinstance(path, _unicode):
if not _uvarprog:
import re
_uvarprog = re.compile(ur'\$(\w+|\{[^}]*\})', re.UNICODE)
varprog = _uvarprog
encoding = sys.getfilesystemencoding()
else:
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})')
varprog = _varprog
encoding = None
i = 0
while True:
m = varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if encoding:
name = name.encode(encoding)
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if encoding:
value = value.decode(encoding)
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, _unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, _unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath('', filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
|
uyaly/test
|
refs/heads/master
|
pageobject/account/Page_Account_SCEO_ADD.py
|
1
|
# coding:utf-8
from utils.ly_selenium import ly # 导入4.11二次封装的类
class Page_Account_SCEO_ADD(ly):
'''超级总监新增'''
# 新增界面输入项
loginid_loc = ("id", '_easyui_textbox_input1')
password_loc = ("id", '_easyui_textbox_input5')
password1_loc = ("id", '_easyui_textbox_input6')
name_loc = ("id", '_easyui_textbox_input2')
phone_loc = ("id", '_easyui_textbox_input3')
# 新增界面按钮
save_button = ("class name", 'l-btn-text') # 保存
ok_button = ("link text", '确定') # 确定
# username = Config().get('SCEO_LOGINNAME')
# psw = Config().get('PASSWORD')
# loginid = Config().get('SCEO_NAME')
# phone = Config().get('PHONE')
def input_loginid(self, loginid):
'''输入账号'''
self.send_keys(self.loginid_loc, loginid)
def input_psw(self, psw):
'''输入密码'''
self.send_keys(self.password_loc, psw)
def input_psw1(self, psw):
'''输入确认密码'''
self.send_keys(self.password1_loc, psw)
def input_name(self, username):
'''输入名字'''
self.send_keys(self.name_loc, username)
def input_phone(self, phone):
'''输入名字'''
self.send_keys(self.phone_loc, phone)
def click_save(self):
'''保存'''
self.click(self.save_button)
def click_ok(self):
'''确定'''
self.click(self.ok_button)
|
pdee/pdee
|
refs/heads/master
|
python-libs/rope/refactor/functionutils.py
|
79
|
import rope.base.exceptions
import rope.base.pyobjects
from rope.base.builtins import Lambda
from rope.base import worder
class DefinitionInfo(object):
def __init__(self, function_name, is_method, args_with_defaults,
args_arg, keywords_arg):
self.function_name = function_name
self.is_method = is_method
self.args_with_defaults = args_with_defaults
self.args_arg = args_arg
self.keywords_arg = keywords_arg
def to_string(self):
return '%s(%s)' % (self.function_name, self.arguments_to_string())
def arguments_to_string(self, from_index=0):
params = []
for arg, default in self.args_with_defaults:
if default is not None:
params.append('%s=%s' % (arg, default))
else:
params.append(arg)
if self.args_arg is not None:
params.append('*' + self.args_arg)
if self.keywords_arg:
params.append('**' + self.keywords_arg)
return ', '.join(params[from_index:])
@staticmethod
def _read(pyfunction, code):
scope = pyfunction.get_scope()
parent = scope.parent
parameter_names = pyfunction.get_param_names()
kind = pyfunction.get_kind()
is_method = kind == 'method'
is_lambda = kind == 'lambda'
info = _FunctionParser(code, is_method, is_lambda)
args, keywords = info.get_parameters()
args_arg = None
keywords_arg = None
if args and args[-1].startswith('**'):
keywords_arg = args[-1][2:]
del args[-1]
if args and args[-1].startswith('*'):
args_arg = args[-1][1:]
del args[-1]
args_with_defaults = [(name, None) for name in args]
args_with_defaults.extend(keywords)
return DefinitionInfo(info.get_function_name(), is_method,
args_with_defaults, args_arg, keywords_arg)
@staticmethod
def read(pyfunction):
pymodule = pyfunction.get_module()
word_finder = worder.Worder(pymodule.source_code)
lineno = pyfunction.get_ast().lineno
start = pymodule.lines.get_line_start(lineno)
if isinstance(pyfunction, Lambda):
call = word_finder.get_lambda_and_args(start)
else:
call = word_finder.get_function_and_args_in_header(start)
return DefinitionInfo._read(pyfunction, call)
class CallInfo(object):
def __init__(self, function_name, args, keywords, args_arg,
keywords_arg, implicit_arg, constructor):
self.function_name = function_name
self.args = args
self.keywords = keywords
self.args_arg = args_arg
self.keywords_arg = keywords_arg
self.implicit_arg = implicit_arg
self.constructor = constructor
def to_string(self):
function = self.function_name
if self.implicit_arg:
function = self.args[0] + '.' + self.function_name
params = []
start = 0
if self.implicit_arg or self.constructor:
start = 1
if self.args[start:]:
params.extend(self.args[start:])
if self.keywords:
params.extend(['%s=%s' % (name, value) for name, value in self.keywords])
if self.args_arg is not None:
params.append('*' + self.args_arg)
if self.keywords_arg:
params.append('**' + self.keywords_arg)
return '%s(%s)' % (function, ', '.join(params))
@staticmethod
def read(primary, pyname, definition_info, code):
is_method_call = CallInfo._is_method_call(primary, pyname)
is_constructor = CallInfo._is_class(pyname)
is_classmethod = CallInfo._is_classmethod(pyname)
info = _FunctionParser(code, is_method_call or is_classmethod)
args, keywords = info.get_parameters()
args_arg = None
keywords_arg = None
if args and args[-1].startswith('**'):
keywords_arg = args[-1][2:]
del args[-1]
if args and args[-1].startswith('*'):
args_arg = args[-1][1:]
del args[-1]
if is_constructor:
args.insert(0, definition_info.args_with_defaults[0][0])
return CallInfo(info.get_function_name(), args, keywords, args_arg,
keywords_arg, is_method_call or is_classmethod,
is_constructor)
@staticmethod
def _is_method_call(primary, pyname):
return primary is not None and \
isinstance(primary.get_object().get_type(),
rope.base.pyobjects.PyClass) and \
CallInfo._is_method(pyname)
@staticmethod
def _is_class(pyname):
return pyname is not None and \
isinstance(pyname.get_object(),
rope.base.pyobjects.PyClass)
@staticmethod
def _is_method(pyname):
if pyname is not None and \
isinstance(pyname.get_object(), rope.base.pyobjects.PyFunction):
return pyname.get_object().get_kind() == 'method'
return False
@staticmethod
def _is_classmethod(pyname):
if pyname is not None and \
isinstance(pyname.get_object(), rope.base.pyobjects.PyFunction):
return pyname.get_object().get_kind() == 'classmethod'
return False
class ArgumentMapping(object):
def __init__(self, definition_info, call_info):
self.call_info = call_info
self.param_dict = {}
self.keyword_args = []
self.args_arg = []
for index, value in enumerate(call_info.args):
if index < len(definition_info.args_with_defaults):
name = definition_info.args_with_defaults[index][0]
self.param_dict[name] = value
else:
self.args_arg.append(value)
for name, value in call_info.keywords:
index = -1
for pair in definition_info.args_with_defaults:
if pair[0] == name:
self.param_dict[name] = value
break
else:
self.keyword_args.append((name, value))
def to_call_info(self, definition_info):
args = []
keywords = []
for index in range(len(definition_info.args_with_defaults)):
name = definition_info.args_with_defaults[index][0]
if name in self.param_dict:
args.append(self.param_dict[name])
else:
for i in range(index, len(definition_info.args_with_defaults)):
name = definition_info.args_with_defaults[i][0]
if name in self.param_dict:
keywords.append((name, self.param_dict[name]))
break
args.extend(self.args_arg)
keywords.extend(self.keyword_args)
return CallInfo(self.call_info.function_name, args, keywords,
self.call_info.args_arg, self.call_info.keywords_arg,
self.call_info.implicit_arg, self.call_info.constructor)
class _FunctionParser(object):
def __init__(self, call, implicit_arg, is_lambda=False):
self.call = call
self.implicit_arg = implicit_arg
self.word_finder = worder.Worder(self.call)
if is_lambda:
self.last_parens = self.call.rindex(':')
else:
self.last_parens = self.call.rindex(')')
self.first_parens = self.word_finder._find_parens_start(self.last_parens)
def get_parameters(self):
args, keywords = self.word_finder.get_parameters(self.first_parens,
self.last_parens)
if self.is_called_as_a_method():
instance = self.call[:self.call.rindex('.', 0, self.first_parens)]
args.insert(0, instance.strip())
return args, keywords
def get_instance(self):
if self.is_called_as_a_method():
return self.word_finder.get_primary_at(
self.call.rindex('.', 0, self.first_parens) - 1)
def get_function_name(self):
if self.is_called_as_a_method():
return self.word_finder.get_word_at(self.first_parens - 1)
else:
return self.word_finder.get_primary_at(self.first_parens - 1)
def is_called_as_a_method(self):
return self.implicit_arg and '.' in self.call[:self.first_parens]
|
xzoert/dedalus
|
refs/heads/master
|
dedalus/ui/requests/ResourceListRequest.py
|
1
|
from . import Request
class ResourceListRequest(Request):
def start(self,client,tagFilter,limit,timeout,pageSize,doneFunction,pageFunction):
self.client=client
self.tagFilter=tagFilter
self.pageSize=pageSize
self.limit=limit
self.timeout=timeout
self.resources=None
self.doneFunction=doneFunction
self.pageFunction=pageFunction
self.idx=0
self.schedule(self.gotResources,task=self.getResources)
def getResources(self,state):
self.resources=self.client.getResources(self.tagFilter,limit=self.limit,timeout=self.timeout)
self.idx=0
def gotResources(self,response):
if len(self.resources)>0:
self.schedule(self.step)
else:
self._done()
if self.doneFunction:
self.doneFunction()
def step(self,state):
count=len(self.resources)
endidx=self.idx+self.pageSize
if endidx>count:
endidx=count
if self.pageFunction:
try:
self.pageFunction(self.resources[self.idx:endidx])
except Exception as err:
self._done()
raise err
self.idx=endidx
if self.idx<count:
self.schedule(self.step)
else:
self._done()
if self.doneFunction:
self.doneFunction()
|
jamesbeebop/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/plugins/userscript/__init__.py
|
81
|
from .main import Userscript
def autoload():
return Userscript()
|
adw0rd/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/contrib/gis/db/backends/spatialite/models.py
|
403
|
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
|
hellfu/ns3-mestrado
|
refs/heads/master
|
wutils.py
|
47
|
import os
import os.path
import re
import sys
import subprocess
import shlex
# WAF modules
from waflib import Options, Utils, Logs, TaskGen, Build, Context
from waflib.Errors import WafError
# these are set from the main wscript file
APPNAME=None
VERSION=None
bld=None
def get_command_template(env, arguments=()):
cmd = Options.options.command_template or '%s'
for arg in arguments:
cmd = cmd + " " + arg
return cmd
if hasattr(os.path, "relpath"):
relpath = os.path.relpath # since Python 2.6
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def find_program(program_name, env):
launch_dir = os.path.abspath(Context.launch_dir)
#top_dir = os.path.abspath(Options.cwd_launch)
found_programs = []
for obj in bld.all_task_gen:
if not getattr(obj, 'is_ns3_program', False):
continue
## filter out programs not in the subtree starting at the launch dir
if not (obj.path.abspath().startswith(launch_dir)
or obj.path.get_bld().abspath().startswith(launch_dir)):
continue
name1 = obj.name
name2 = os.path.join(relpath(obj.path.abspath(), launch_dir), obj.name)
names = [name1, name2]
found_programs.extend(names)
if program_name in names:
return obj
raise ValueError("program '%s' not found; available programs are: %r"
% (program_name, found_programs))
def get_proc_env(os_env=None):
env = bld.env
if sys.platform == 'linux2':
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform == 'win32':
pathvar = 'PATH'
elif sys.platform == 'cygwin':
pathvar = 'PATH'
elif sys.platform.startswith('freebsd'):
pathvar = 'LD_LIBRARY_PATH'
else:
Logs.warn(("Don't know how to configure "
"dynamic library path for the platform %r;"
" assuming it's LD_LIBRARY_PATH.") % (sys.platform,))
pathvar = 'LD_LIBRARY_PATH'
proc_env = dict(os.environ)
if os_env is not None:
proc_env.update(os_env)
if pathvar is not None:
if pathvar in proc_env:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']) + [proc_env[pathvar]])
else:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']))
pymoddir = bld.path.find_dir('bindings/python').get_bld().abspath()
pyvizdir = bld.path.find_dir('src/visualizer').abspath()
if 'PYTHONPATH' in proc_env:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir] + [proc_env['PYTHONPATH']])
else:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir])
if 'PATH' in proc_env:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']) + [proc_env['PATH']])
else:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']))
return proc_env
def run_argv(argv, env, os_env=None, cwd=None, force_no_valgrind=False):
proc_env = get_proc_env(os_env)
if Options.options.valgrind and not force_no_valgrind:
if Options.options.command_template:
raise WafError("Options --command-template and --valgrind are conflicting")
if not env['VALGRIND']:
raise WafError("valgrind is not installed")
argv = [env['VALGRIND'], "--leak-check=full", "--show-reachable=yes", "--error-exitcode=1"] + argv
proc = subprocess.Popen(argv, env=proc_env, cwd=cwd, stderr=subprocess.PIPE)
error = False
for line in proc.stderr:
sys.stderr.write(line)
if "== LEAK SUMMARY" in line:
error = True
retval = proc.wait()
if retval == 0 and error:
retval = 1
else:
try:
WindowsError
except NameError:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
else:
try:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
except WindowsError, ex:
raise WafError("Command %s raised exception %s" % (argv, ex))
if retval:
signame = None
if retval < 0: # signal?
import signal
for name, val in vars(signal).iteritems():
if len(name) > 3 and name[:3] == 'SIG' and name[3] != '_':
if val == -retval:
signame = name
break
if signame:
raise WafError("Command %s terminated with signal %s."
" Run it under a debugger to get more information "
"(./waf --run <program> --command-template=\"gdb --args %%s <args>\")." % (argv, signame))
else:
raise WafError("Command %s exited with code %i" % (argv, retval))
return retval
def get_run_program(program_string, command_template=None):
"""
Return the program name and argv of the process that would be executed by
run_program(program_string, command_template).
"""
#print "get_run_program_argv(program_string=%r, command_template=%r)" % (program_string, command_template)
env = bld.env
if command_template in (None, '%s'):
argv = shlex.split(program_string)
#print "%r ==shlex.split==> %r" % (program_string, argv)
program_name = argv[0]
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
execvec = [program_node.abspath()] + argv[1:]
else:
program_name = program_string
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
tmpl = command_template % (program_node.abspath(),)
execvec = shlex.split(tmpl.replace('\\', '\\\\'))
#print "%r ==shlex.split==> %r" % (command_template % (program_node.abspath(env),), execvec)
return program_name, execvec
def run_program(program_string, env, command_template=None, cwd=None, visualize=False):
"""
if command_template is not None, then program_string == program
name and argv is given by command_template with %s replaced by the
full path to the program. Else, program_string is interpreted as
a shell command with first name being the program name.
"""
dummy_program_name, execvec = get_run_program(program_string, command_template)
if cwd is None:
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv(execvec, env, cwd=cwd)
def run_python_program(program_string, env, visualize=False):
env = bld.env
execvec = shlex.split(program_string)
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv([env['PYTHON'][0]] + execvec, env, cwd=cwd)
def uniquify_list(seq):
"""Remove duplicates while preserving order
From Dave Kirby http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
|
urban48/debpackager
|
refs/heads/master
|
debpackager/packages/general/general.py
|
1
|
from debpackager.packages.general_package import GeneralPackage
class General(GeneralPackage):
def __init__(self, kwargs):
super(General, self).__init__(**kwargs)
|
msebire/intellij-community
|
refs/heads/master
|
python/helpers/pydev/tests_pydevd_python/test_debugger.py
|
6
|
'''
The idea is that we record the commands sent to the debugger and reproduce them from this script
(so, this works as the client, which spawns the debugger as a separate process and communicates
to it as if it was run from the outside)
Note that it's a python script but it'll spawn a process to run as jython, ironpython and as python.
'''
import os
import platform
import sys
import threading
import time
import unittest
from tests_pydevd_python import debugger_unittest
from tests_pydevd_python.debugger_unittest import get_free_port
CMD_SET_PROPERTY_TRACE, CMD_EVALUATE_CONSOLE_EXPRESSION, CMD_RUN_CUSTOM_OPERATION, CMD_ENABLE_DONT_TRACE = 133, 134, 135, 141
IS_CPYTHON = platform.python_implementation() == 'CPython'
IS_IRONPYTHON = platform.python_implementation() == 'IronPython'
IS_JYTHON = platform.python_implementation() == 'Jython'
IS_NUMPY = True
try:
import numpy
except ImportError:
IS_NUMPY = False
try:
xrange
except:
xrange = range
TEST_DJANGO = False
if sys.version_info[:2] == (2, 7):
# Only test on python 2.7 for now
try:
import django
TEST_DJANGO = True
except:
pass
IS_PY2 = False
if sys.version_info[0] == 2:
IS_PY2 = True
if IS_PY2:
builtin_qualifier = "__builtin__"
else:
builtin_qualifier = "builtins"
IS_PY36 = False
if sys.version_info[0] == 3 and sys.version_info[1] == 6:
IS_PY36 = True
TEST_CYTHON = os.getenv('PYDEVD_USE_CYTHON', None) == 'YES'
TEST_JYTHON = os.getenv('TEST_JYTHON', None) == 'YES'
#=======================================================================================================================
# WriterThreadCaseSetNextStatement
#======================================================================================================================
class WriterThreadCaseSetNextStatement(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_set_next_statement.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(6, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 6, 'Expected return to be in line 6, was: %s' % line
self.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'a')
self.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier))
self.write_set_next_statement(thread_id, 2, 'method')
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
self.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'a')
self.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 1"'.format(builtin_qualifier))
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# AbstractWriterThreadCaseDjango
#======================================================================================================================
class AbstractWriterThreadCaseDjango(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
def get_command_line_args(self):
free_port = get_free_port()
self.django_port = free_port
return [
debugger_unittest._get_debugger_test_file(os.path.join('my_django_proj_17', 'manage.py')),
'runserver',
'--noreload',
str(free_port),
]
def write_add_breakpoint_django(self, line, func, template):
'''
@param line: starts at 1
'''
breakpoint_id = self.next_breakpoint_id()
template_file = debugger_unittest._get_debugger_test_file(os.path.join('my_django_proj_17', 'my_app', 'templates', 'my_app', template))
self.write("111\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % (self.next_seq(), breakpoint_id, 'django-line', template_file, line, func))
self.log.append('write_add_django_breakpoint: %s line: %s func: %s' % (breakpoint_id, line, func))
return breakpoint_id
def create_request_thread(self, uri):
outer= self
class T(threading.Thread):
def run(self):
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
for _ in xrange(10):
try:
stream = urlopen('http://127.0.0.1:%s/%s' % (outer.django_port,uri))
self.contents = stream.read()
break
except IOError:
continue
return T()
#=======================================================================================================================
# WriterThreadCaseDjango
#======================================================================================================================
class WriterThreadCaseDjango(AbstractWriterThreadCaseDjango):
def run(self):
self.start_socket()
self.write_add_breakpoint_django(5, None, 'index.html')
self.write_make_initial_run()
t = self.create_request_thread('my_app')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 5, 'Expected return to be in line 5, was: %s' % line
self.write_get_variable(thread_id, frame_id, 'entry')
self.wait_for_vars([
'<var name="key" type="str"',
'v1'
])
self.write_run_thread(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 5, 'Expected return to be in line 5, was: %s' % line
self.write_get_variable(thread_id, frame_id, 'entry')
self.wait_for_vars([
'<var name="key" type="str"',
'v2'
])
self.write_run_thread(thread_id)
for _ in xrange(10):
if hasattr(t, 'contents'):
break
time.sleep(.3)
else:
raise AssertionError('Django did not return contents properly!')
contents = t.contents.replace(' ', '').replace('\r', '').replace('\n', '')
if contents != '<ul><li>v1:v1</li><li>v2:v2</li></ul>':
raise AssertionError('%s != <ul><li>v1:v1</li><li>v2:v2</li></ul>' % (contents,))
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseDjango2
#======================================================================================================================
class WriterThreadCaseDjango2(AbstractWriterThreadCaseDjango):
def run(self):
self.start_socket()
self.write_add_breakpoint_django(4, None, 'name.html')
self.write_make_initial_run()
t = self.create_request_thread('my_app/name')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 4, 'Expected return to be in line 4, was: %s' % line
self.write_get_frame(thread_id, frame_id)
self.wait_for_var('<var name="form" type="NameForm" qualifier="my_app.forms" value="NameForm%253A')
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase19 - [Test Case]: Evaluate '__' attributes
#======================================================================================================================
class WriterThreadCase19(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case19.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(8, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 8, 'Expected return to be in line 8, was: %s' % line
self.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'a.__var')
self.wait_for_evaluation('<var name="a.__var" type="int" qualifier="{0}" value="int'.format(builtin_qualifier))
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase20 - [Test Case]: Breakpoint on line with exception
#======================================================================================================================
class WriterThreadCase20(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case20.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(3, 'fn_with_except')
self.write_make_initial_run()
for i in range(2):
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 3, 'Expected return to be in line 3, was: %s' % line
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase18 - [Test Case]: change local variable
#======================================================================================================================
class WriterThreadCase18(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case18.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(5, 'm2')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 5, 'Expected return to be in line 2, was: %s' % line
self.write_change_variable(thread_id, frame_id, 'a', '40')
self.wait_for_var('<xml><var name="" type="int" qualifier="{0}" value="int%253A 40" />%0A</xml>'.format(builtin_qualifier,))
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase17 - [Test Case]: dont trace
#======================================================================================================================
class WriterThreadCase17(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case17.py')
def run(self):
self.start_socket()
self.write_enable_dont_trace(True)
self.write_add_breakpoint(27, 'main')
self.write_add_breakpoint(29, 'main')
self.write_add_breakpoint(31, 'main')
self.write_add_breakpoint(33, 'main')
self.write_make_initial_run()
for i in range(4):
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Should Skip step into properties setter
assert line == 2, 'Expected return to be in line 2, was: %s' % line
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase17a - [Test Case]: dont trace return
#======================================================================================================================
class WriterThreadCase17a(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case17a.py')
def run(self):
self.start_socket()
self.write_enable_dont_trace(True)
self.write_add_breakpoint(2, 'm1')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
self.write_step_in(thread_id)
thread_id, frame_id, line, name = self.wait_for_breakpoint_hit('107', get_line=True, get_name=True)
# Should Skip step into properties setter
assert name == 'm3'
assert line == 10, 'Expected return to be in line 10, was: %s' % line
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase16 - [Test Case]: numpy.ndarray resolver
#======================================================================================================================
class WriterThreadCase16(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case16.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(9, 'main')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
# In this test we check that the three arrays of different shapes, sizes and types
# are all resolved properly as ndarrays.
# First pass check is that we have all three expected variables defined
self.write_get_frame(thread_id, frame_id)
self.wait_for_multiple_vars((
'<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j 8.%252B1.j%250A 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j 16.%252B1.j 17.%252B1.j%250A 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j 24.%252B1.j 25.%252B1.j 26.%252B1.j%250A 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j%250A 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j%250A 45.%252B1.j 46.%252B1.j 47.%252B1.j 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j%250A 54.%252B1.j 55.%252B1.j 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j%250A 63.%252B1.j 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j 80.%252B1.j%250A 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j 88.%252B1.j 89.%252B1.j%250A 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j 96.%252B1.j 97.%252B1.j 98.%252B1.j%250A 99.%252B1.j%255D" isContainer="True" />',
'<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ... 9997 9998 9999%255D%250A %255B10000 10001 10002 ... 19997 19998 19999%255D%250A %255B20000 20001 20002 ... 29997 29998 29999%255D%250A ...%250A %255B70000 70001 70002 ... 79997 79998 79999%255D%250A %255B80000 80001 80002 ... 89997 89998 89999%255D%250A %255B90000 90001 90002 ... 99997 99998 99999%255D%255D" isContainer="True" />',
'<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ... 9999997 9999998 9999999%255D" isContainer="True" />',
))
# For each variable, check each of the resolved (meta data) attributes...
self.write_get_variable(thread_id, frame_id, 'smallarray')
self.wait_for_multiple_vars((
'<var name="min" type="complex128"',
'<var name="max" type="complex128"',
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
# ...and check that the internals are resolved properly
self.write_get_variable(thread_id, frame_id, 'smallarray\t__internals__')
self.wait_for_var('<var name="%27size%27')
self.write_get_variable(thread_id, frame_id, 'bigarray')
# isContainer could be true on some numpy versions, so, we only check for the var begin.
self.wait_for_multiple_vars((
[
'<var name="min" type="int64" qualifier="numpy" value="int64%253A 0"',
'<var name="min" type="int64" qualifier="numpy" value="int64%3A 0"',
'<var name="size" type="int" qualifier="{0}" value="int%3A 100000"'.format(builtin_qualifier),
],
[
'<var name="max" type="int64" qualifier="numpy" value="int64%253A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
'<var name="max" type="int64" qualifier="numpy" value="int64%3A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"'
))
self.write_get_variable(thread_id, frame_id, 'bigarray\t__internals__')
self.wait_for_var('<var name="%27size%27')
# this one is different because it crosses the magic threshold where we don't calculate
# the min/max
self.write_get_variable(thread_id, frame_id, 'hugearray')
self.wait_for_var((
[
'<var name="min" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
],
[
'<var name="max" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
self.write_get_variable(thread_id, frame_id, 'hugearray\t__internals__')
self.wait_for_var('<var name="%27size%27')
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase15 - [Test Case]: Custom Commands
#======================================================================================================================
class WriterThreadCase15(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case15.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(22, 'main')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
# Access some variable
self.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (thread_id, frame_id), "EXEC", "f=lambda x: 'val=%s' % x", "f")
self.wait_for_custom_operation('val=Black')
assert 7 == self._sequence, 'Expected 7. Had: %s' % self._sequence
self.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (thread_id, frame_id), "EXECFILE", debugger_unittest._get_debugger_test_file('_debugger_case15_execfile.py'), "f")
self.wait_for_custom_operation('val=Black')
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase14 - [Test Case]: Interactive Debug Console
#======================================================================================================================
class WriterThreadCase14(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case14.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(22, 'main')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert thread_id, '%s not valid.' % thread_id
assert frame_id, '%s not valid.' % frame_id
# Access some variable
self.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (thread_id, frame_id))
self.wait_for_var(['<more>False</more>', '%27Black%27'])
assert 7 == self._sequence, 'Expected 9. Had: %s' % self._sequence
# Change some variable
self.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color='Red'" % (thread_id, frame_id))
self.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (thread_id, frame_id))
self.wait_for_var(['<more>False</more>', '%27Red%27'])
assert 11 == self._sequence, 'Expected 13. Had: %s' % self._sequence
# Iterate some loop
self.write_debug_console_expression("%s\t%s\tEVALUATE\tfor i in range(3):" % (thread_id, frame_id))
self.wait_for_var(['<xml><more>True</more></xml>'])
self.write_debug_console_expression("%s\t%s\tEVALUATE\t print(i)" % (thread_id, frame_id))
self.wait_for_var(['<xml><more>True</more></xml>'])
self.write_debug_console_expression("%s\t%s\tEVALUATE\t" % (thread_id, frame_id))
self.wait_for_var(
[
'<xml><more>False</more><output message="0"></output><output message="1"></output><output message="2"></output></xml>' ]
)
assert 17 == self._sequence, 'Expected 19. Had: %s' % self._sequence
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase13
#======================================================================================================================
class WriterThreadCase13(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case13.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(35, 'main')
self.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, self.next_seq(), "true;false;false;true"))
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_get_frame(thread_id, frame_id)
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Should go inside setter method
assert line == 25, 'Expected return to be in line 25, was: %s' % line
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Should go inside getter method
assert line == 21, 'Expected return to be in line 21, was: %s' % line
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Disable property tracing
self.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, self.next_seq(), "true;true;true;true"))
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Should Skip step into properties setter
assert line == 39, 'Expected return to be in line 39, was: %s' % line
# Enable property tracing
self.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, self.next_seq(), "true;false;false;true"))
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# Should go inside getter method
assert line == 8, 'Expected return to be in line 8, was: %s' % line
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase12
#======================================================================================================================
class WriterThreadCase12(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, '') # Should not be hit: setting empty function (not None) should only hit global.
self.write_add_breakpoint(6, 'Method1a')
self.write_add_breakpoint(11, 'Method2')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
self.write_step_return(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True) # not a return (it stopped in the other breakpoint)
assert line == 6, 'Expected return to be in line 6, was: %s' % line
self.write_run_thread(thread_id)
assert 13 == self._sequence, 'Expected 13. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase11
#======================================================================================================================
class WriterThreadCase11(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'Method1')
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 3, 'Expected return to be in line 3, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 12, 'Expected return to be in line 12, was: %s' % line
self.write_run_thread(thread_id)
assert 13 == self._sequence, 'Expected 13. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase10
#======================================================================================================================
class WriterThreadCase10(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'None') # None or Method should make hit.
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit('111')
self.write_step_return(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('109', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 12, 'Expected return to be in line 12, was: %s' % line
self.write_run_thread(thread_id)
assert 11 == self._sequence, 'Expected 11. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase9
#======================================================================================================================
class WriterThreadCase9(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case89.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method3')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit('111')
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
assert line == 12, 'Expected return to be in line 12, was: %s' % line
self.write_run_thread(thread_id)
assert 11 == self._sequence, 'Expected 11. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase8
#======================================================================================================================
class WriterThreadCase8(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case89.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method3')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit('111')
self.write_step_return(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('109', True)
assert line == 15, 'Expected return to be in line 15, was: %s' % line
self.write_run_thread(thread_id)
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase7
#======================================================================================================================
class WriterThreadCase7(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case7.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'Call')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit('111')
self.write_get_frame(thread_id, frame_id)
self.wait_for_vars('<xml></xml>') # no vars at this point
self.write_step_over(thread_id)
self.wait_for_breakpoint_hit('108')
self.write_get_frame(thread_id, frame_id)
self.wait_for_vars('<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A</xml>'.format(builtin_qualifier))
self.write_step_over(thread_id)
self.wait_for_breakpoint_hit('108')
self.write_get_frame(thread_id, frame_id)
self.wait_for_vars('<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" qualifier="{0}" value="int%253A 20" />%0A</xml>'.format(builtin_qualifier))
self.write_run_thread(thread_id)
assert 17 == self._sequence, 'Expected 17. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase6
#=======================================================================================================================
class WriterThreadCase6(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case56.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'Call2')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_step_return(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('109', True)
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
self.write_run_thread(thread_id)
assert 13 == self._sequence, 'Expected 15. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase5
#=======================================================================================================================
class WriterThreadCase5(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case56.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(2, 'Call2')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_remove_breakpoint(breakpoint_id)
self.write_step_return(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('109', True)
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
self.write_step_in(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('107', True)
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
self.write_run_thread(thread_id)
assert 15 == self._sequence, 'Expected 15. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase4
#=======================================================================================================================
class WriterThreadCase4(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case4.py')
def run(self):
self.start_socket()
self.write_make_initial_run()
thread_id = self.wait_for_new_thread()
self.write_suspend_thread(thread_id)
time.sleep(4) # wait for time enough for the test to finish if it wasn't suspended
self.write_run_thread(thread_id)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase3
#=======================================================================================================================
class WriterThreadCase3(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case3.py')
def run(self):
self.start_socket()
self.write_make_initial_run()
time.sleep(.5)
breakpoint_id = self.write_add_breakpoint(4, '')
self.write_add_breakpoint(5, 'FuncNotAvailable') # Check that it doesn't get hit in the global when a function is available
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_run_thread(thread_id)
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
assert 17 == self._sequence, 'Expected 17. Had: %s' % self._sequence
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCase2
#=======================================================================================================================
class WriterThreadCase2(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case2.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(3, 'Call4') # seq = 3
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_add_breakpoint(14, 'Call2')
self.write_run_thread(thread_id)
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_get_frame(thread_id, frame_id)
self.write_run_thread(thread_id)
self.log.append('Checking sequence. Found: %s' % (self._sequence))
assert 15 == self._sequence, 'Expected 15. Had: %s' % self._sequence
self.log.append('Marking finished ok.')
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseQThread1
#=======================================================================================================================
class WriterThreadCaseQThread1(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_qthread1.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(19, 'run')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.log.append('Checking sequence. Found: %s' % (self._sequence))
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.log.append('Marking finished ok.')
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseQThread2
#=======================================================================================================================
class WriterThreadCaseQThread2(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_qthread2.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(24, 'long_running')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.log.append('Checking sequence. Found: %s' % (self._sequence))
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.log.append('Marking finished ok.')
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseQThread3
#=======================================================================================================================
class WriterThreadCaseQThread3(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_qthread3.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(22, 'run')
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.log.append('Checking sequence. Found: %s' % (self._sequence))
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.log.append('Marking finished ok.')
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseQThread4
#=======================================================================================================================
class WriterThreadCaseQThread4(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_qthread4.py')
def run(self):
self.start_socket()
breakpoint_id = self.write_add_breakpoint(28, 'on_start') # breakpoint on print('On start called2').
self.write_make_initial_run()
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.log.append('Checking sequence. Found: %s' % (self._sequence))
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
self.log.append('Marking finished ok.')
self.finished_ok = True
def additional_output_checks(self, stdout, stderr):
if 'On start called' not in stdout:
raise AssertionError('Expected "On start called" to be in stdout:\n%s' % (stdout,))
if 'Done sleeping' not in stdout:
raise AssertionError('Expected "Done sleeping" to be in stdout:\n%s' % (stdout,))
if 'native Qt signal is not callable' in stderr:
raise AssertionError('Did not expect "native Qt signal is not callable" to be in stderr:\n%s' % (stderr,))
#=======================================================================================================================
# WriterThreadCase1
#=======================================================================================================================
class WriterThreadCase1(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case1.py')
def run(self):
self.start_socket()
self.log.append('writing add breakpoint')
self.write_add_breakpoint(6, 'set_up')
self.log.append('making initial run')
self.write_make_initial_run()
self.log.append('waiting for breakpoint hit')
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.log.append('get frame')
self.write_get_frame(thread_id, frame_id)
self.log.append('step over')
self.write_step_over(thread_id)
self.log.append('get frame')
self.write_get_frame(thread_id, frame_id)
self.log.append('run thread')
self.write_run_thread(thread_id)
self.log.append('asserting')
try:
assert 13 == self._sequence, 'Expected 13. Had: %s' % self._sequence
except:
self.log.append('assert failed!')
raise
self.log.append('asserted')
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseMSwitch
#=======================================================================================================================
class WriterThreadCaseMSwitch(debugger_unittest.AbstractWriterThread):
TEST_FILE = 'tests_pydevd_python._debugger_case_m_switch'
IS_MODULE = True
def get_environ(self):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
root_dirname = os.path.dirname(os.path.dirname(__file__))
curr_pythonpath += root_dirname + os.pathsep
env['PYTHONPATH'] = curr_pythonpath
return env
def get_main_filename(self):
return debugger_unittest._get_debugger_test_file('_debugger_case_m_switch.py')
def run(self):
self.start_socket()
self.log.append('writing add breakpoint')
breakpoint_id = self.write_add_breakpoint(1, None)
self.log.append('making initial run')
self.write_make_initial_run()
self.log.append('waiting for breakpoint hit')
thread_id, frame_id = self.wait_for_breakpoint_hit()
self.write_remove_breakpoint(breakpoint_id)
self.log.append('run thread')
self.write_run_thread(thread_id)
self.log.append('asserting')
try:
assert 9 == self._sequence, 'Expected 9. Had: %s' % self._sequence
except:
self.log.append('assert failed!')
raise
self.log.append('asserted')
self.finished_ok = True
# =======================================================================================================================
# WriterThreadCaseModuleWithEntryPoint
# =======================================================================================================================
class WriterThreadCaseModuleWithEntryPoint(WriterThreadCaseMSwitch):
TEST_FILE = 'tests_pydevd_python._debugger_case_module_entry_point:main'
IS_MODULE = True
def get_main_filename(self):
return debugger_unittest._get_debugger_test_file('_debugger_case_module_entry_point.py')
#=======================================================================================================================
# WriterThreadCaseRemoteDebugger
#=======================================================================================================================
class WriterThreadCaseRemoteDebugger(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_remote.py')
def run(self):
self.start_socket(8787)
self.log.append('making initial run')
self.write_make_initial_run()
self.log.append('waiting for breakpoint hit')
thread_id, frame_id = self.wait_for_breakpoint_hit('105')
self.log.append('run thread')
self.write_run_thread(thread_id)
self.log.append('asserting')
try:
assert 5 == self._sequence, 'Expected 5. Had: %s' % self._sequence
except:
self.log.append('assert failed!')
raise
self.log.append('asserted')
self.finished_ok = True
#=======================================================================================================================
# _SecondaryMultiProcProcessWriterThread
#=======================================================================================================================
class _SecondaryMultiProcProcessWriterThread(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
def __init__(self, server_socket):
debugger_unittest.AbstractWriterThread.__init__(self)
self.server_socket = server_socket
def run(self):
print('waiting for second process')
self.sock, addr = self.server_socket.accept()
print('accepted second process')
from tests_pydevd_python.debugger_unittest import ReaderThread
self.reader_thread = ReaderThread(self.sock)
self.reader_thread.start()
self._sequence = -1
# initial command is always the version
self.write_version()
self.log.append('start_socket')
self.write_make_initial_run()
time.sleep(.5)
self.finished_ok = True
#=======================================================================================================================
# WriterThreadCaseRemoteDebuggerMultiProc
#=======================================================================================================================
class WriterThreadCaseRemoteDebuggerMultiProc(debugger_unittest.AbstractWriterThread):
# It seems sometimes it becomes flaky on the ci because the process outlives the writer thread...
# As we're only interested in knowing if a second connection was received, just kill the related
# process.
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_remote_1.py')
def run(self):
self.start_socket(8787)
self.log.append('making initial run')
self.write_make_initial_run()
self.log.append('waiting for breakpoint hit')
thread_id, frame_id = self.wait_for_breakpoint_hit('105')
self.secondary_multi_proc_process_writer_thread = secondary_multi_proc_process_writer_thread = \
_SecondaryMultiProcProcessWriterThread(self.server_socket)
secondary_multi_proc_process_writer_thread.start()
self.log.append('run thread')
self.write_run_thread(thread_id)
for _i in xrange(400):
if secondary_multi_proc_process_writer_thread.finished_ok:
break
time.sleep(.1)
else:
self.log.append('Secondary process not finished ok!')
raise AssertionError('Secondary process not finished ok!')
self.log.append('Secondary process finished!')
try:
assert 5 == self._sequence, 'Expected 5. Had: %s' % self._sequence
except:
self.log.append('assert failed!')
raise
self.log.append('asserted')
self.finished_ok = True
def do_kill(self):
debugger_unittest.AbstractWriterThread.do_kill(self)
if hasattr(self, 'secondary_multi_proc_process_writer_thread'):
self.secondary_multi_proc_process_writer_thread.do_kill()
#=======================================================================================================================
# WriterThreadCaseTypeExt - [Test Case]: Custom type presentation extensions
#======================================================================================================================
class WriterThreadCaseTypeExt(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_type_ext.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(7, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_get_frame(thread_id, frame_id)
self.wait_for_var(r'<var name="my_rect" type="Rect" qualifier="__main__" value="Rectangle%255BLength%253A 5%252C Width%253A 10 %252C Area%253A 50%255D" isContainer="True" />') is True
self.write_get_variable(thread_id, frame_id, 'my_rect')
self.wait_for_var(r'<var name="area" type="int" qualifier="{0}" value="int%253A 50" />'.format(builtin_qualifier)) is True
self.write_run_thread(thread_id)
self.finished_ok = True
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH","")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH']= ext_base + os.pathsep + python_path if python_path else ext_base
return env
#=======================================================================================================================
# WriterThreadCaseEventExt - [Test Case]: Test initialize event for extensions
#======================================================================================================================
class WriterThreadCaseEventExt(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case_event_ext.py')
def run(self):
self.start_socket()
self.write_make_initial_run()
self.finished_ok = True
def additional_output_checks(self, stdout, stderr):
if 'INITIALIZE EVENT RECEIVED' not in stdout:
raise AssertionError('No initialize event received')
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH","")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH']= ext_base + os.pathsep + python_path if python_path else ext_base
env["VERIFY_EVENT_TEST"] = "1"
return env
#=======================================================================================================================
# Test
#=======================================================================================================================
class Test(unittest.TestCase, debugger_unittest.DebuggerRunner):
def get_command_line(self):
if IS_JYTHON:
if sys.executable is not None:
# i.e.: we're running with the provided jython.exe
return [sys.executable]
else:
return [
get_java_location(),
'-classpath',
get_jython_jar(),
'org.python.util.jython'
]
if IS_CPYTHON:
return [sys.executable, '-u']
if IS_IRONPYTHON:
return [
sys.executable,
'-X:Frames'
]
raise RuntimeError('Unable to provide command line')
@unittest.skipIf(IS_IRONPYTHON, reason='Test needs gc.get_referrers to really check anything.')
def test_case_1(self):
self.check_case(WriterThreadCase1)
def test_case_2(self):
self.check_case(WriterThreadCase2)
@unittest.skipIf(IS_IRONPYTHON, reason='This test fails once in a while due to timing issues on IronPython, so, skipping it.')
def test_case_3(self):
self.check_case(WriterThreadCase3)
@unittest.skipIf(IS_JYTHON, reason='This test is flaky on Jython, so, skipping it.')
def test_case_4(self):
self.check_case(WriterThreadCase4)
def test_case_5(self):
self.check_case(WriterThreadCase5)
def test_case_6(self):
self.check_case(WriterThreadCase6)
@unittest.skipIf(IS_IRONPYTHON, "Different behavior on IronPython")
def test_case_7(self):
# This test checks that we start without variables and at each step a new var is created, but on ironpython,
# the variables exist all at once (with None values), so, we can't test it properly.
self.check_case(WriterThreadCase7)
def test_case_8(self):
self.check_case(WriterThreadCase8)
def test_case_9(self):
self.check_case(WriterThreadCase9)
def test_case_10(self):
self.check_case(WriterThreadCase10)
def test_case_11(self):
self.check_case(WriterThreadCase11)
def test_case_12(self):
self.check_case(WriterThreadCase12)
@unittest.skipIf(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).')
def test_case_13(self):
self.check_case(WriterThreadCase13)
def test_case_14(self):
self.check_case(WriterThreadCase14)
def test_case_15(self):
self.check_case(WriterThreadCase15)
@unittest.skipIf(not IS_NUMPY, "numpy not available")
def test_case_16(self):
self.check_case(WriterThreadCase16)
def test_case_17(self):
self.check_case(WriterThreadCase17)
def test_case_17a(self):
self.check_case(WriterThreadCase17a)
@unittest.skipIf(IS_IRONPYTHON or IS_JYTHON, 'Unsupported assign to local')
def test_case_18(self):
self.check_case(WriterThreadCase18)
def test_case_19(self):
self.check_case(WriterThreadCase19)
# PY-29051
def test_case_20(self):
self.check_case(WriterThreadCase20)
if TEST_DJANGO:
def test_case_django(self):
self.check_case(WriterThreadCaseDjango)
def test_case_django2(self):
self.check_case(WriterThreadCaseDjango2)
if TEST_CYTHON:
def test_cython(self):
from _pydevd_bundle import pydevd_cython
assert pydevd_cython.trace_dispatch is not None
def _has_qt(self):
try:
from PySide import QtCore # @UnresolvedImport
return True
except:
try:
from PyQt4 import QtCore
return True
except:
try:
from PyQt5 import QtCore
return True
except:
pass
return False
def test_case_qthread1(self):
if self._has_qt():
self.check_case(WriterThreadCaseQThread1)
def test_case_qthread2(self):
if self._has_qt():
self.check_case(WriterThreadCaseQThread2)
def test_case_qthread3(self):
if self._has_qt():
self.check_case(WriterThreadCaseQThread3)
def test_case_qthread4(self):
if self._has_qt():
self.check_case(WriterThreadCaseQThread4)
def test_m_switch(self):
self.check_case(WriterThreadCaseMSwitch)
def test_module_entry_point(self):
self.check_case(WriterThreadCaseModuleWithEntryPoint)
@unittest.skip('New behaviour differs from PyDev -- needs to be investigated).')
def test_case_set_next_statement(self):
self.check_case(WriterThreadCaseSetNextStatement)
@unittest.skipIf(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).')
def test_case_type_ext(self):
self.check_case(WriterThreadCaseTypeExt)
@unittest.skipIf(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).')
def test_case_event_ext(self):
self.check_case(WriterThreadCaseEventExt)
@unittest.skipIf(not IS_CPYTHON, reason='CPython only test.')
class TestPythonRemoteDebugger(unittest.TestCase, debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable, '-u']
def add_command_line_args(self, args):
return args + [self.writer_thread.TEST_FILE]
def test_remote_debugger(self):
self.check_case(WriterThreadCaseRemoteDebugger)
@unittest.skipIf(IS_PY2, "Skip test for Python 2, because child process sometimes keeps alive")
def test_remote_debugger2(self):
self.check_case(WriterThreadCaseRemoteDebuggerMultiProc)
def get_java_location():
from java.lang import System # @UnresolvedImport
jre_dir = System.getProperty("java.home")
for f in [os.path.join(jre_dir, 'bin', 'java.exe'), os.path.join(jre_dir, 'bin', 'java')]:
if os.path.exists(f):
return f
raise RuntimeError('Unable to find java executable')
def get_jython_jar():
from java.lang import ClassLoader # @UnresolvedImport
cl = ClassLoader.getSystemClassLoader()
paths = map(lambda url: url.getFile(), cl.getURLs())
for p in paths:
if 'jython.jar' in p:
return p
raise RuntimeError('Unable to find jython.jar')
def get_location_from_line(line):
loc = line.split('=')[1].strip()
if loc.endswith(';'):
loc = loc[:-1]
if loc.endswith('"'):
loc = loc[:-1]
if loc.startswith('"'):
loc = loc[1:]
return loc
def split_line(line):
if '=' not in line:
return None, None
var = line.split('=')[0].strip()
return var, get_location_from_line(line)
# c:\bin\jython2.7.0\bin\jython.exe -m py.test tests_python
|
tartavull/google-cloud-python
|
refs/heads/master
|
speech/google/__init__.py
|
194
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
GREO/gnuradio-git
|
refs/heads/master
|
gnuradio-core/src/python/gnuradio/gruimpl/crc.py
|
12
|
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from hexint import *
import struct
def gen_and_append_crc32(s):
crc = gr.crc32(s)
return s + struct.pack(">I", hexint(crc) & 0xFFFFFFFF)
def check_crc32(s):
if len(s) < 4:
return (False, '')
msg = s[:-4]
#print "msg = '%s'" % (msg,)
actual = gr.crc32(msg)
(expected,) = struct.unpack(">I", s[-4:])
# print "actual =", hex(actual), "expected =", hex(expected)
return (actual == expected, msg)
|
nurav/balrog
|
refs/heads/master
|
auslib/util/timestamp.py
|
5
|
import time
def getMillisecondTimestamp():
t = int(time.time() * 1000)
return t
|
nlloyd/SubliminalCollaborator
|
refs/heads/master
|
libs/twisted/lore/docbook.py
|
60
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DocBook output support for Lore.
"""
import os, cgi
from xml.dom import minidom as dom
from twisted.lore import latex
class DocbookSpitter(latex.BaseLatexSpitter):
currentLevel = 1
def writeNodeData(self, node):
self.writer(node.data)
def visitNode_body(self, node):
self.visitNodeDefault(node)
self.writer('</section>'*self.currentLevel)
def visitNodeHeader(self, node):
level = int(node.tagName[1])
difference, self.currentLevel = level-self.currentLevel, level
self.writer('<section>'*difference+'</section>'*-difference)
if difference<=0:
self.writer('</section>\n<section>')
self.writer('<title>')
self.visitNodeDefault(node)
def visitNode_a_listing(self, node):
fileName = os.path.join(self.currDir, node.getAttribute('href'))
self.writer('<programlisting>\n')
self.writer(cgi.escape(open(fileName).read()))
self.writer('</programlisting>\n')
def visitNode_a_href(self, node):
self.visitNodeDefault(node)
def visitNode_a_name(self, node):
self.visitNodeDefault(node)
def visitNode_li(self, node):
for child in node.childNodes:
if getattr(child, 'tagName', None) != 'p':
new = dom.Element('p')
new.childNodes = [child]
node.replaceChild(new, child)
self.visitNodeDefault(node)
visitNode_h2 = visitNode_h3 = visitNode_h4 = visitNodeHeader
end_h2 = end_h3 = end_h4 = '</title><para />'
start_title, end_title = '<section><title>', '</title><para />'
start_p, end_p = '<para>', '</para>'
start_strong, end_strong = start_em, end_em = '<emphasis>', '</emphasis>'
start_span_footnote, end_span_footnote = '<footnote><para>', '</para></footnote>'
start_q = end_q = '"'
start_pre, end_pre = '<programlisting>', '</programlisting>'
start_div_note, end_div_note = '<note>', '</note>'
start_li, end_li = '<listitem>', '</listitem>'
start_ul, end_ul = '<itemizedlist>', '</itemizedlist>'
start_ol, end_ol = '<orderedlist>', '</orderedlist>'
start_dl, end_dl = '<variablelist>', '</variablelist>'
start_dt, end_dt = '<varlistentry><term>', '</term>'
start_dd, end_dd = '<listitem><para>', '</para></listitem></varlistentry>'
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/interproscan/package.py
|
3
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Interproscan(Package):
"""InterProScan is the software package that allows sequences
(protein and nucleic) to be scanned against InterPro's signatures.
Signatures are predictive models, provided by several different
databases, that make up the InterPro consortium."""
homepage = "https://www.ebi.ac.uk/interpro/interproscan.html"
url = "https://github.com/ebi-pf-team/interproscan/archive/5.36-75.0.tar.gz"
version('5.38-76.0', sha256='cb191ff8eee275689b789167a57b368ea5c06bbcd36b4de23e8bbbbdc0fc7434')
version('5.36-75.0', sha256='383d7431e47c985056c856ceb6d4dcf7ed2559a4a3d5c210c01ce3975875addb')
version('4.8',
sha256='f1cb0ae1218eb05ed59ad7f94883f474eb9a6185a56ad3a93a364acb73506a3f',
url='ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/4/RELEASE/4.8/iprscan_v4.8.tar.gz')
resource(
when='@:4.8',
name='binaries',
url="http://ftp.ebi.ac.uk/pub/databases/interpro/iprscan/BIN/4.x/iprscan_bin4.x_Linux64.tar.gz",
sha256='551610a4682b112522f3ded5268f76ba9a47399a72e726fafb17cc938a50e7ee',
)
depends_on('java@8.0:8.9', type=('build', 'run'), when='@5:5.36-99.0')
depends_on('java@11.0:', type=('build', 'run'), when='@5.37-76.0:')
depends_on('maven', type='build', when='@5:')
depends_on('perl@5:', type=('build', 'run'))
depends_on('python@3:', when='@5:', type=('build', 'run'))
depends_on('perl-cgi', when='@:4.8', type=('build', 'run'))
depends_on('perl-mailtools', when='@:4.8', type=('build', 'run'))
depends_on('perl-xml-quote', when='@:4.8', type=('build', 'run'))
depends_on('perl-xml-parser', when='@:4.8', type=('build', 'run'))
depends_on('perl-io-string', when='@:4.8', type=('build', 'run'))
depends_on('perl-io-stringy', when='@:4.8', type=('build', 'run'))
patch('large-gid.patch', when='@5:')
patch('non-interactive.patch', when='@:4.8')
patch('ps_scan.patch', when='@:4.8')
def install(self, spec, prefix):
with working_dir('core'):
which('mvn')('clean', 'install')
install_tree('.', prefix)
# link the main shell script into the PATH
ips_bin_suffix = 'core/jms-implementation/target/interproscan-5-dist'
symlink(join_path(prefix, ips_bin_suffix), prefix.bin)
@when('@:4.8')
def install(self, spec, prefix):
perl = which('perl')
src = join_path(self.stage.source_path, 'iprscan', 'bin', 'Linux')
dst = join_path(self.stage.source_path, 'bin', 'binaries')
force_symlink(src, dst)
install_tree('.', prefix)
with working_dir(prefix):
perl('Config.pl')
|
backupchecker/backupchecker
|
refs/heads/master
|
backupchecker/generatelist/generatelistfortar.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright © 2015-2017 Carl Chenet <chaica@backupcheckerproject.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Generate a list of files from a tar archive
'''Generate a list of files from a tar archive'''
import fnmatch
import logging
import os
import os.path
import sys
import tarfile
from backupchecker.generatelist.generatelist import GenerateList
from backupchecker.checkhashes import get_hash
class GenerateListForTar(GenerateList):
'''Generate a list of files from a tar archive'''
def __init__(self, __genparams):
'''The constructor for the GenerateListForTar class'''
self.__arcpath = __genparams['arcpath']
self.__delimiter = __genparams['delimiter']
self._genlist = __genparams['genlist']
self._genfull = __genparams['genfull']
self.__listoutput = __genparams['listoutput']
self.__confoutput = __genparams['confoutput']
self.__fulloutput = __genparams['fulloutput']
self.__getallhashes = __genparams['getallhashes']
self.__hashtype = __genparams['hashtype']
self.__parsingexceptions = __genparams['parsingexceptions']
self.__isastream = __genparams['isastream']
self.__confname = __genparams['confname']
try:
if self.__isastream:
self.__tarstreamname = 'tarstream'
self.__streampath = os.path.join(self.__arcpath, self.__tarstreamname)
__tar = tarfile.open(mode='r|*', fileobj=sys.stdin.buffer)
else:
__tar = tarfile.open(self.__arcpath, 'r')
self.__main(__tar)
except (tarfile.TarError, EOFError) as _msg:
__warn = '. You should investigate for a data corruption.'
logging.warning('{}: {}{}'.format(self.__arcpath, str(_msg), __warn))
def __main(self, __tar):
'''Main for the GenerateListForTar class'''
# extract mtime of the archive
if not self.__isastream:
__arcstat = os.stat(self.__arcpath)
__listoffiles = ['[archive]\nmtime{} {}\n\n[files]\n'.format(self.__delimiter,__arcstat.st_mtime)]
else:
__listoffiles = ['[files]\n']
__oneline = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value}\n'.format(value='{}', delimiter=self.__delimiter)
if self.__getallhashes:
# we get all the hash sums of files inside the backup
if not self.__hashtype:
__onelinewithhash = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value} md5{delimiter}{value}\n'.format(value='{}', delimiter=self.__delimiter)
else:
# we switch the default hash sum
__onelinewithhash = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value} {hashtype}{delimiter}{value}\n'.format(value='{}', hashtype=self.__hashtype, delimiter=self.__delimiter)
else:
__onelinewithouthash = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value}\n'.format(value='{}', delimiter=self.__delimiter)
__onelinewithtarget = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value} target{delimiter}{value}\n'.format(value='{}', delimiter=self.__delimiter)
for __tarinfo in __tar:
# Pick up tar information
__tarinfo.name = self._normalize_path(__tarinfo.name)
__type = self.__translate_type(__tarinfo.type)
__mode = oct(__tarinfo.mode).split('o')[-1]
# if the file has no right, need to manipulate the output - solving #15
if __mode == '0':
__mode = '000'
if __type == 'f':
if self.__getallhashes:
# extract all hash sums from the archive
if not self.__hashtype:
# extract hash sum of the file inside the archive
__hash = get_hash(__tar.extractfile(__tarinfo.name), 'md5')
else:
# switch the default hash sum type
__hash = get_hash(__tar.extractfile(__tarinfo.name), self.__hashtype)
# format the retrieved information
__listoffiles.append(__onelinewithhash.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime),
__hash,
__tarinfo.linkname))
else:
# check if there are exceptions while parsing
if self.__parsingexceptions:
for __file in self.__parsingexceptions:
if fnmatch.fnmatch(__tarinfo.name, __file):
__hash = get_hash(__tar.extractfile(__tarinfo.name), self.__parsingexceptions[__file])
__onelinewithhash = '{value}{delimiter} ={value} uid{delimiter}{value} gid{delimiter}{value} owner{delimiter}{value} group{delimiter}{value} mode{delimiter}{value} type{delimiter}{value} mtime{delimiter}{value} {hashtype}{delimiter}{value}\n'.format(value='{}', hashtype=self.__parsingexceptions[__file], delimiter=self.__delimiter)
__listoffiles.append(__onelinewithhash.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime),
__hash,
__tarinfo.linkname))
else:
# we use exceptions-file option but the file is not concerned by an exception
__listoffiles.append(__onelinewithouthash.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime),
__tarinfo.linkname))
else:
# we don't use the --exceptions-file option
__listoffiles.append(__onelinewithouthash.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime),
__tarinfo.linkname))
elif __type == 'l' or __type == 's':
# format the retrieved information
__listoffiles.append(__onelinewithtarget.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime),
__tarinfo.linkname))
else:
# if file is not regular file, ignoring its hash sum
__listoffiles.append(__oneline.format(__tarinfo.name,
str(__tarinfo.size),
str(__tarinfo.uid),
str(__tarinfo.gid),
str(__tarinfo.uname),
str(__tarinfo.gname),
__mode,
__type,
float(__tarinfo.mtime)))
# Compose the name of the generated list
### for tar archive
if self.__arcpath.lower().endswith('.tar'):
self.__make_conf_and_list_paths('.tar')
### for tar.gz archive
elif self.__arcpath.lower().endswith('.tar.gz'):
self.__make_conf_and_list_paths('.tar.gz')
### for tar.bz2 archive
elif self.__arcpath.lower().endswith('.tar.bz2'):
self.__make_conf_and_list_paths('.tar.bz2')
### for tar.xz archive
elif self.__arcpath.lower().endswith('.tar.xz'):
self.__make_conf_and_list_paths('.tar.xz')
### for tgz archive
elif self.__arcpath.lower().endswith('.tgz'):
self.__make_conf_and_list_paths('.tgz')
### for tbz archive
elif self.__arcpath.lower().endswith('.tbz'):
self.__make_conf_and_list_paths('.tbz')
### for tbz2 archive
elif self.__arcpath.lower().endswith('.tbz2'):
self.__make_conf_and_list_paths('.tbz2')
### for tar stream
elif self.__isastream:
#if self._genfull:
# self.__arcname = self.__tarstreamname
#if self.__confname:
# self.__arcname = self.__confname
# self.__arcconfpath = ''.join([self.__confname, '.conf'])
# self.__arclistpath = ''.join([self.__confname, '.list'])
#else:
# self.__arcconfpath = ''.join([self.__streampath, '.conf'])
# self.__arclistpath = ''.join([self.__streampath, '.list'])
self.__make_conf_and_list_paths('')
# call the method to write information in a file
__listconfinfo = {'arclistpath': self.__arclistpath,
'listoffiles':__listoffiles}
self._generate_list(__listconfinfo)
# call the method to write the configuration file if --gen-full was required
if self._genfull:
# generate the hash sum of the list of files
__listhashsum = self._get_list_hash(__listconfinfo['arclistpath'])
if self.__isastream:
__confinfo = {'arcname':self.__arcname,
'arcconfpath': self.__arcconfpath,
'arclistpath': self.__arclistpath,
'arctype': 'archive',
'sha512': __listhashsum}
else:
__confinfo = {'arcname':self.__arcname,
'arcpath':self.__arcpath,
'arcconfpath': self.__arcconfpath,
'arclistpath': self.__arclistpath,
'arctype': 'archive',
'sha512': __listhashsum}
self._generate_conf(__confinfo,self.__isastream)
def __translate_type(self, __arctype):
'''Translate the type of the file inside the tar by a generic
name
'''
__types = {tarfile.REGTYPE: 'f',
tarfile.AREGTYPE: 'a',
tarfile.CHRTYPE: 'c',
tarfile.DIRTYPE: 'd',
tarfile.LNKTYPE: 'l',
tarfile.SYMTYPE: 's',
tarfile.CONTTYPE: 'n',
tarfile.BLKTYPE: 'b',
tarfile.GNUTYPE_SPARSE: 'g',
tarfile.FIFOTYPE: 'o'}
return __types[__arctype]
def __make_conf_and_list_paths(self, __tartype):
'''Make conf file path and list file paths'''
if not self.__isastream:
__arcwithext = os.path.split(self.__arcpath[:-(len(__tartype)-1)])[1]
# behaviour for --gen-list option
# define custom path for the filelist or use the default one
if self.__listoutput:
# --gen-list and --output-list-dir and --configuration-name
if self.__confname:
self.__arclistpath = os.path.join(self.__listoutput, ''.join([self.__confname, '.', 'list']))
# --gen-list and --output-list-dir
else:
if self.__isastream:
self.__arclistpath = os.path.join(self.__listoutput, '.'.join([self.__tarstreamname, 'list']))
else:
self.__arclistpath = os.path.join(self.__listoutput, ''.join([__arcwithext, 'list']))
# define custom path for both filelist and conflist
elif self.__fulloutput:
# --gen-list and --output-list-and-conf-dir and --configuration-name
if self.__confname:
self.__arclistpath = os.path.join(self.__fulloutput, ''.join([self.__confname, '.', 'list']))
else:
# --gen-list and --ouput-list-and-conf-dir
if self.__isastream:
self.__arclistpath = os.path.join(self.__fulloutput, '.'.join([self.__tarstreamname, 'list']))
else:
self.__arclistpath = os.path.join(self.__fulloutput, ''.join([__arcwithext, 'list']))
else:
# only --configuration-name
if self.__confname:
__arcpath = os.path.dirname(self.__arcpath)
__arcpath = os.path.join(__arcpath, self.__confname)
self.__arclistpath = ''.join([__arcpath, '.', 'list'])
# default behaviour
else:
if self.__isastream:
__arcdir = os.path.dirname(self.__arcpath)
self.__arclistpath = os.path.join(__arcdir, '.'.join([self.__tarstreamname, 'list']))
else:
self.__arclistpath = ''.join([self.__arcpath[:-(len(__tartype)-1)], 'list'])
# behaviour for --gen-full option
if self._genfull:
# define custom path for the conf file
if self.__confoutput:
if self.__confname:
# --gen-full and --output-conf-dir and --configuration-name
self.__arcconfpath = os.path.join(self.__confoutput, ''.join([self.__confname, '.', 'conf']))
else:
# --gen-full and --output-conf-dir
if self.__isastream:
self.__arcconfpath = os.path.join(self.__confoutput, '.'.join([self.__tarstreamname, 'conf']))
else:
self.__arcconfpath = os.path.join(self.__confoutput, ''.join([__arcwithext, 'conf']))
elif self.__fulloutput:
# --gen-full and --output-list-and-conf-dir and --configuration-name
if self.__confname:
self.__arcconfpath = os.path.join(self.__fulloutput, ''.join([self.__confname, '.', 'conf']))
else:
# --gen-full and --output-list-and-conf-dir
if self.__isastream:
self.__arcconfpath = os.path.join(self.__fulloutput, '.'.join([self.__tarstreamname, 'conf']))
else:
self.__arcconfpath = os.path.join(self.__fulloutput, ''.join([__arcwithext, 'conf']))
else:
# --gen-full and --configuration-name
if self.__confname:
__arcpath = os.path.dirname(self.__arcpath)
__arcpath = os.path.join(__arcpath, self.__confname)
self.__arcconfpath = ''.join([__arcpath, '.', 'conf'])
else:
# only --gen-full
if self.__isastream:
__arcdir = os.path.dirname(self.__arcpath)
self.__arcconfpath = os.path.join(__arcdir, '.'.join([self.__tarstreamname, 'conf']))
else:
self.__arcconfpath = ''.join([self.__arcpath[:-(len(__tartype)-1)], 'conf'])
# user-defined name of the archive/stream
if self.__confname:
self.__arcname = self.__confname
else:
if self.__isastream:
self.__arcname = self.__tarstreamname
else:
self.__arcname = os.path.basename(self.__arcpath[:-len(__tartype)])
|
minrk/sympy
|
refs/heads/master
|
sympy/simplify/epathtools.py
|
6
|
"""Tools for manipulation of expressions using paths. """
from sympy.core import Basic
class EPath(object):
"""
Manipulate expressions using paths.
EPath grammar in EBNF notation::
literal ::= /[A-Za-z_][A-Za-z_0-9]*/
number ::= /-?\d+/
type ::= literal
attribute ::= literal "?"
all ::= "*"
slice ::= "[" number? (":" number? (":" number?)?)? "]"
range ::= all | slice
query ::= (type | attribute) ("|" (type | attribute))*
selector ::= range | query range?
path ::= "/" selector ("/" selector)*
See the docstring of the epath() function.
"""
__slots__ = ["_path", "_epath"]
def __new__(cls, path):
"""Construct new EPath. """
if isinstance(path, EPath):
return path
if not path:
raise ValueError("empty EPath")
_path = path
if path[0] == '/':
path = path[1:]
else:
raise NotImplementedError("non-root EPath")
epath = []
for selector in path.split('/'):
selector = selector.strip()
if not selector:
raise ValueError("empty selector")
index = 0
for c in selector:
if c.isalnum() or c == '_' or c == '|' or c == '?':
index += 1
else:
break
attrs = []
types = []
if index:
elements = selector[:index]
selector = selector[index:]
for element in elements.split('|'):
element = element.strip()
if not element:
raise ValueError("empty element")
if element.endswith('?'):
attrs.append(element[:-1])
else:
types.append(element)
span = None
if selector == '*':
pass
else:
if selector.startswith('['):
try:
i = selector.index(']')
except ValueError:
raise ValueError("expected ']', got EOL")
_span, span = selector[1:i], []
if ':' not in _span:
span = int(_span)
else:
for elt in _span.split(':', 3):
if not elt:
span.append(None)
else:
span.append(int(elt))
span = slice(*span)
selector = selector[i+1:]
if selector:
raise ValueError("trailing characters in selector")
epath.append((attrs, types, span))
obj = object.__new__(cls)
obj._path = _path
obj._epath = epath
return obj
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._path)
def _get_ordered_args(self, expr):
"""Sort ``expr.args`` using printing order. """
if expr.is_Add:
return expr.as_ordered_terms()
elif expr.is_Mul:
return expr.as_ordered_factors()
else:
return expr.args
def _hasattrs(self, expr, attrs):
"""Check if ``expr`` has any of ``attrs``. """
for attr in attrs:
if not hasattr(expr, attr):
return False
return True
def _hastypes(self, expr, types):
"""Check if ``expr`` is any of ``types``. """
_types = [ cls.__name__ for cls in expr.__class__.mro() ]
return bool(set(_types).intersection(types))
def _has(self, expr, attrs, types):
"""Apply ``_hasattrs`` and ``_hastypes`` to ``expr``. """
if not (attrs or types):
return True
if attrs and self._hasattrs(expr, attrs):
return True
if types and self._hastypes(expr, types):
return True
return False
def apply(self, expr, func, args=None, kwargs=None):
"""
Modify parts of an expression selected by a path.
**Examples**
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.apply(expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.apply(expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
def _apply(path, expr, func):
if not path:
return func(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
if not expr.is_Atom:
args, basic = self._get_ordered_args(expr), True
else:
return expr
elif hasattr(expr, '__iter__'):
args, basic = expr, False
else:
return expr
args = list(args)
if span is not None:
if type(span) == slice:
indices = range(*span.indices(len(args)))
else:
indices = [span]
else:
indices = xrange(len(args))
for i in indices:
try:
arg = args[i]
except IndexError:
continue
if self._has(arg, attrs, types):
args[i] = _apply(path, arg, func)
if basic:
return expr.func(*args)
else:
return expr.__class__(args)
_args, _kwargs = args or (), kwargs or {}
_func = lambda expr: func(expr, *_args, **_kwargs)
return _apply(self._epath, expr, _func)
def select(self, expr):
"""
Retrieve parts of an expression selected by a path.
**Examples**
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.select(expr)
[x, y]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.select(expr)
[x, x, y]
"""
result = []
def _select(path, expr):
if not path:
result.append(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
args = self._get_ordered_args(expr)
elif hasattr(expr, '__iter__'):
args = expr
else:
return
if span is not None:
if type(span) == slice:
args = args[span]
else:
try:
args = [args[span]]
except IndexError:
return
for arg in args:
if self._has(arg, attrs, types):
_select(path, arg)
_select(self._epath, expr)
return result
def epath(path, expr=None, func=None, args=None, kwargs=None):
"""
Manipulate parts of an expression selected by a path.
This function allows to manipulate large nested expressions in single
line of code, utilizing techniques to those applied in XML processing
standards (e.g. XPath).
If ``func`` is ``None``, :func:`epath` retrieves elements selected by
the ``path``. Otherwise it applies ``func`` to each matching element.
Note that it is more efficient to create an EPath object and use the select
and apply methods of that object, since this will compile the path string
only once. This function should only be used as a convenient shortcut for
interactive use.
**Syntax**
select all : "/*"
Equivalent of ``for arg in args:``.
select slice : "/[0]" | "/[1:5]" | "/[1:5:2]"
Supports standard Python's slice syntax.
select by type : "/list" | "/list|tuple"
Emulates :func:`isinstance`.
select by attribute : "/__iter__?"
Emulates :func:`hasattr`.
**Parameters**
path : str | EPath
A path as a string or a compiled EPath.
expr : Basic | iterable
An expression or a container of expressions.
func : callable (optional)
A callable that will be applied to matching parts.
args : tuple (optional)
Additional positional arguments to ``func``.
kwargs : dict (optional)
Additional keyword arguments to ``func``.
**Examples**
>>> from sympy.simplify.epathtools import epath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = "/*/[0]/Symbol"
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> epath(path, expr)
[x, y]
>>> epath(path, expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = "/*/*/Symbol"
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> epath(path, expr)
[x, x, y]
>>> epath(path, expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
_epath = EPath(path)
if expr is None:
return _epath
if func is None:
return _epath.select(expr)
else:
return _epath.apply(expr, func, args, kwargs)
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/nets/vgg_test.py
|
25
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.vgg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim.nets import vgg
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class VGGATest(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',
'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',
'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = [
'vgg_a/conv1/conv1_1/weights',
'vgg_a/conv1/conv1_1/biases',
'vgg_a/conv2/conv2_1/weights',
'vgg_a/conv2/conv2_1/biases',
'vgg_a/conv3/conv3_1/weights',
'vgg_a/conv3/conv3_1/biases',
'vgg_a/conv3/conv3_2/weights',
'vgg_a/conv3/conv3_2/biases',
'vgg_a/conv4/conv4_1/weights',
'vgg_a/conv4/conv4_1/biases',
'vgg_a/conv4/conv4_2/weights',
'vgg_a/conv4/conv4_2/biases',
'vgg_a/conv5/conv5_1/weights',
'vgg_a/conv5/conv5_1/biases',
'vgg_a/conv5/conv5_2/weights',
'vgg_a/conv5/conv5_2/biases',
'vgg_a/fc6/weights',
'vgg_a/fc6/biases',
'vgg_a/fc7/weights',
'vgg_a/fc7/biases',
'vgg_a/fc8/weights',
'vgg_a/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG16Test(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1',
'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2',
'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4',
'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7',
'vgg_16/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_16(inputs, num_classes)
expected_names = [
'vgg_16/conv1/conv1_1/weights',
'vgg_16/conv1/conv1_1/biases',
'vgg_16/conv1/conv1_2/weights',
'vgg_16/conv1/conv1_2/biases',
'vgg_16/conv2/conv2_1/weights',
'vgg_16/conv2/conv2_1/biases',
'vgg_16/conv2/conv2_2/weights',
'vgg_16/conv2/conv2_2/biases',
'vgg_16/conv3/conv3_1/weights',
'vgg_16/conv3/conv3_1/biases',
'vgg_16/conv3/conv3_2/weights',
'vgg_16/conv3/conv3_2/biases',
'vgg_16/conv3/conv3_3/weights',
'vgg_16/conv3/conv3_3/biases',
'vgg_16/conv4/conv4_1/weights',
'vgg_16/conv4/conv4_1/biases',
'vgg_16/conv4/conv4_2/weights',
'vgg_16/conv4/conv4_2/biases',
'vgg_16/conv4/conv4_3/weights',
'vgg_16/conv4/conv4_3/biases',
'vgg_16/conv5/conv5_1/weights',
'vgg_16/conv5/conv5_1/biases',
'vgg_16/conv5/conv5_2/weights',
'vgg_16/conv5/conv5_2/biases',
'vgg_16/conv5/conv5_3/weights',
'vgg_16/conv5/conv5_3/biases',
'vgg_16/fc6/weights',
'vgg_16/fc6/biases',
'vgg_16/fc7/weights',
'vgg_16/fc7/biases',
'vgg_16/fc8/weights',
'vgg_16/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG19Test(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',
'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',
'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',
'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1/weights',
'vgg_19/conv1/conv1_1/biases',
'vgg_19/conv1/conv1_2/weights',
'vgg_19/conv1/conv1_2/biases',
'vgg_19/conv2/conv2_1/weights',
'vgg_19/conv2/conv2_1/biases',
'vgg_19/conv2/conv2_2/weights',
'vgg_19/conv2/conv2_2/biases',
'vgg_19/conv3/conv3_1/weights',
'vgg_19/conv3/conv3_1/biases',
'vgg_19/conv3/conv3_2/weights',
'vgg_19/conv3/conv3_2/biases',
'vgg_19/conv3/conv3_3/weights',
'vgg_19/conv3/conv3_3/biases',
'vgg_19/conv3/conv3_4/weights',
'vgg_19/conv3/conv3_4/biases',
'vgg_19/conv4/conv4_1/weights',
'vgg_19/conv4/conv4_1/biases',
'vgg_19/conv4/conv4_2/weights',
'vgg_19/conv4/conv4_2/biases',
'vgg_19/conv4/conv4_3/weights',
'vgg_19/conv4/conv4_3/biases',
'vgg_19/conv4/conv4_4/weights',
'vgg_19/conv4/conv4_4/biases',
'vgg_19/conv5/conv5_1/weights',
'vgg_19/conv5/conv5_1/biases',
'vgg_19/conv5/conv5_2/weights',
'vgg_19/conv5/conv5_2/biases',
'vgg_19/conv5/conv5_3/weights',
'vgg_19/conv5/conv5_3/biases',
'vgg_19/conv5/conv5_4/weights',
'vgg_19/conv5/conv5_4/biases',
'vgg_19/fc6/weights',
'vgg_19/fc6/biases',
'vgg_19/fc7/weights',
'vgg_19/fc7/biases',
'vgg_19/fc8/weights',
'vgg_19/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
test.main()
|
Seinlin/nv7fire-external-chromium
|
refs/heads/master
|
testing/gtest/scripts/upload_gtest.py
|
1963
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
paweljasinski/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/concurrent/futures/__init__.py
|
247
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from concurrent.futures._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.