repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jtpereyda/sulley
|
refs/heads/develop
|
sulley/ifuzz_logger.py
|
1
|
import abc
class IFuzzLogger(object):
"""
Abstract class for logging fuzz data.
Usage while testing:
1. Open test case.
2. Open test step.
3. Use other log methods.
IFuzzLogger provides the logging interface for the Sulley framework and
test writers.
The methods provided are meant to mirror functional test actions. Instead of
generic debug/info/warning methods, IFuzzLogger provides a means for logging
test cases, passes, failures, test steps, etc.
This hypothetical sample output gives an idea of how the logger should be
used:
Test Case: UDP.Header.Address 3300
Test Step: Fuzzing
Send: 45 00 13 ab 00 01 40 00 40 11 c9 ...
Test Step: Process monitor check
Check OK
Test Step: DNP Check
Send: ff ff ff ff ff ff 00 0c 29 d1 10 ...
Recv: 00 0c 29 d1 10 81 00 30 a7 05 6e ...
Check: Reply is as expected.
Check OK
Test Case: UDP.Header.Address 3301
Test Step: Fuzzing
Send: 45 00 13 ab 00 01 40 00 40 11 c9 ...
Test Step: Process monitor check
Check Failed: "Process returned exit code 1"
Test Step: DNP Check
Send: ff ff ff ff ff ff 00 0c 29 d1 10 ...
Recv: None
Check: Reply is as expected.
Check Failed
A test case is opened for each fuzzing case. A test step is opened for each
high-level test step. Test steps can include, for example::
* Fuzzing
* Set up (pre-fuzzing)
* Post-test cleanup
* Instrumentation checks
* Reset due to failure
Within a test step, a test may log data sent, data received, checks, check
results, and other information.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def open_test_case(self, test_case_id):
"""
Open a test case - i.e., a fuzzing mutation.
:param test_case_id: Test case name/number. Should be unique.
:return: None
"""
raise NotImplementedError
@abc.abstractmethod
def open_test_step(self, description):
"""
Open a test step - e.g., "Fuzzing", "Pre-fuzz", "Response Check."
:param description: Description of fuzzing step.
:return: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_send(self, data):
"""
Records data as about to be sent to the target.
:param data: Transmitted data
:type data: bytes
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_recv(self, data):
"""
Records data as having been received from the target.
:param data: Received data.
:type data: bytes
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_check(self, description):
"""
Records a check on the system under test. AKA "instrumentation check."
:param description: Received data.
:type description: str
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_pass(self, description=""):
"""
Records a check that passed.
:param description: Optional supplementary data..
:type description: str
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_fail(self, description=""):
"""
Records a check that failed. This will flag a fuzzing case as a
potential bug or anomaly.
:param description: Optional supplementary data.
:type description: str
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_info(self, description):
"""
Catch-all method for logging test information
:param description: Information.
:type description: str
:return: None
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def log_error(self, description):
"""
Records an internal error. This informs the operaor that the test was
not completed successfully.
:param description: Received data.
:type description: str
:return: None
:rtype: None
"""
raise NotImplementedError
|
bigswitch/tempest
|
refs/heads/master
|
tempest/cmd/init.py
|
1
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import sys
from cliff import command
from oslo_log import log as logging
from six import moves
LOG = logging.getLogger(__name__)
TESTR_CONF = """[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \\
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \\
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \\
${PYTHON:-python} -m subunit.run discover -t %s %s $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
group_regex=([^\.]*\.)*
"""
def get_tempest_default_config_dir():
"""Get default config directory of tempest
Returns the correct default config dir to support both cases of
tempest being or not installed in a virtualenv.
Cases considered:
- no virtual env, python2: real_prefix and base_prefix not set
- no virtual env, python3: real_prefix not set, base_prefix set and
identical to prefix
- virtualenv, python2: real_prefix and prefix are set and different
- virtualenv, python3: real_prefix not set, base_prefix and prefix are
set and identical
- pyvenv, any python version: real_prefix not set, base_prefix and prefix
are set and different
:return: default config dir
"""
real_prefix = getattr(sys, 'real_prefix', None)
base_prefix = getattr(sys, 'base_prefix', None)
prefix = sys.prefix
global_conf_dir = '/etc/tempest'
if (real_prefix is None and
(base_prefix is None or base_prefix == prefix) and
os.path.isdir(global_conf_dir)):
# Probably not running in a virtual environment.
# NOTE(andreaf) we cannot distinguish this case from the case of
# a virtual environment created with virtualenv, and running python3.
# Also if it appears we are not in virtual env and fail to find
# global config: '/etc/tempest', fall back to
# '[sys.prefix]/etc/tempest'
return global_conf_dir
else:
conf_dir = os.path.join(prefix, 'etc/tempest')
if os.path.isdir(conf_dir):
return conf_dir
else:
# NOTE: The prefix is gotten from the path which pyconfig.h is
# installed under. Some envs contain it under /usr/include, not
# /user/local/include. Then prefix becomes /usr on such envs.
# However, etc/tempest is installed under /usr/local and the bove
# path logic mismatches. This is a workaround for such envs.
return os.path.join(prefix, 'local/etc/tempest')
class TempestInit(command.Command):
"""Setup a local working environment for running tempest"""
def get_parser(self, prog_name):
parser = super(TempestInit, self).get_parser(prog_name)
parser.add_argument('dir', nargs='?', default=os.getcwd())
parser.add_argument('--config-dir', '-c', default=None)
parser.add_argument('--show-global-config-dir', '-s',
action='store_true', dest='show_global_dir',
help="Print the global config dir location, "
"then exit")
return parser
def generate_testr_conf(self, local_path):
testr_conf_path = os.path.join(local_path, '.testr.conf')
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf = TESTR_CONF % (top_level_path, discover_path)
with open(testr_conf_path, 'w+') as testr_conf_file:
testr_conf_file.write(testr_conf)
def update_local_conf(self, conf_path, lock_dir, log_dir):
config_parse = moves.configparser.SafeConfigParser()
config_parse.optionxform = str
with open(conf_path, 'a+') as conf_file:
# Set local lock_dir in tempest conf
if not config_parse.has_section('oslo_concurrency'):
config_parse.add_section('oslo_concurrency')
config_parse.set('oslo_concurrency', 'lock_path', lock_dir)
# Set local log_dir in tempest conf
config_parse.set('DEFAULT', 'log_dir', log_dir)
# Set default log filename to tempest.log
config_parse.set('DEFAULT', 'log_file', 'tempest.log')
config_parse.write(conf_file)
def copy_config(self, etc_dir, config_dir):
shutil.copytree(config_dir, etc_dir)
def generate_sample_config(self, local_dir, config_dir):
conf_generator = os.path.join(config_dir,
'config-generator.tempest.conf')
subprocess.call(['oslo-config-generator', '--config-file',
conf_generator],
cwd=local_dir)
def create_working_dir(self, local_dir, config_dir):
# Create local dir if missing
if not os.path.isdir(local_dir):
LOG.debug('Creating local working dir: %s' % local_dir)
os.mkdir(local_dir)
elif not os.listdir(local_dir) == []:
raise OSError("Directory you are trying to initialize already "
"exists and is not empty: %s" % local_dir)
lock_dir = os.path.join(local_dir, 'tempest_lock')
etc_dir = os.path.join(local_dir, 'etc')
config_path = os.path.join(etc_dir, 'tempest.conf')
log_dir = os.path.join(local_dir, 'logs')
testr_dir = os.path.join(local_dir, '.testrepository')
# Create lock dir
if not os.path.isdir(lock_dir):
LOG.debug('Creating lock dir: %s' % lock_dir)
os.mkdir(lock_dir)
# Create log dir
if not os.path.isdir(log_dir):
LOG.debug('Creating log dir: %s' % log_dir)
os.mkdir(log_dir)
# Create and copy local etc dir
self.copy_config(etc_dir, config_dir)
# Generate the sample config file
self.generate_sample_config(local_dir, config_dir)
# Update local confs to reflect local paths
self.update_local_conf(config_path, lock_dir, log_dir)
# Generate a testr conf file
self.generate_testr_conf(local_dir)
# setup local testr working dir
if not os.path.isdir(testr_dir):
subprocess.call(['testr', 'init'], cwd=local_dir)
def take_action(self, parsed_args):
config_dir = parsed_args.config_dir or get_tempest_default_config_dir()
if parsed_args.show_global_dir:
print("Global config dir is located at: %s" % config_dir)
sys.exit(0)
self.create_working_dir(parsed_args.dir, config_dir)
|
bcorbet/SickRage
|
refs/heads/master
|
lib/hachoir_parser/common/__init__.py
|
12133432
| |
rvs/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/crashrecovery/commit_create_tests/trigger_sql/__init__.py
|
12133432
| |
erikr/django
|
refs/heads/master
|
tests/bash_completion/management/commands/__init__.py
|
12133432
| |
i02sopop/Kirinki
|
refs/heads/master
|
__init__.py
|
12133432
| |
anant-dev/django
|
refs/heads/master
|
django/contrib/gis/geos/io.py
|
588
|
"""
Module that holds classes for performing I/O operations on GEOS geometry
objects. Specifically, this has Python implementations of WKB/WKT
reader and writer classes.
"""
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.prototypes.io import (
WKBWriter, WKTWriter, _WKBReader, _WKTReader,
)
__all__ = ['WKBWriter', 'WKTWriter', 'WKBReader', 'WKTReader']
# Public classes for (WKB|WKT)Reader, which return GEOSGeometry
class WKBReader(_WKBReader):
def read(self, wkb):
"Returns a GEOSGeometry for the given WKB buffer."
return GEOSGeometry(super(WKBReader, self).read(wkb))
class WKTReader(_WKTReader):
def read(self, wkt):
"Returns a GEOSGeometry for the given WKT string."
return GEOSGeometry(super(WKTReader, self).read(wkt))
|
DigitalCampus/django-oppia
|
refs/heads/master
|
settings/migrations/0016_setting_descriptions.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.utils.translation import ugettext_lazy as _
from settings import constants
def add_setting_descriptions(apps, schema_editor):
props = apps.get_model("settings", "SettingProperties")
setting_oppia_data_retention_years_desc(props)
setting_max_upload_size_desc(props)
setting_oppia_allow_self_registration_desc(props)
setting_oppia_android_on_google_play_desc(props)
setting_oppia_android_packageid_desc(props)
setting_oppia_badges_enabled_desc(props)
setting_oppia_points_enabled_desc(props)
setting_oppia_map_visualisation_enabled_desc(props)
setting_oppia_cartodb_account_desc(props)
setting_oppia_cartodb_key_desc(props)
setting_oppia_google_analytics_code_desc(props)
setting_oppia_google_analytics_domain_desc(props)
setting_oppia_google_analytics_enabled_desc(props)
setting_oppia_hostname_desc(props)
setting_oppia_ipstack_apikey_desc(props)
setting_oppia_show_gravatars_desc(props)
def setting_oppia_data_retention_years_desc(props):
prop = props.objects.get(key=constants.OPPIA_DATA_RETENTION_YEARS)
prop.description = _(u"The number of years for users data to be kept. \
Any users who have not logged in and not had any tracker activity \
in this number of years will be removed from Oppia, along with \
their activity data")
prop.save()
def setting_max_upload_size_desc(props):
prop = props.objects.get(key=constants.MAX_UPLOAD_SIZE)
prop.description = _(u"The maximum upload size, in bytes, of course \
files that will be allowed")
prop.save()
def setting_oppia_allow_self_registration_desc(props):
prop = props.objects.get(key=constants.OPPIA_ALLOW_SELF_REGISTRATION)
prop.description = _(u"Whether or not this Oppia server allows users \
to self register")
prop.save()
def setting_oppia_android_on_google_play_desc(props):
prop = props.objects.get(key=constants.OPPIA_ANDROID_ON_GOOGLE_PLAY)
prop.description = _(u"Whether or not this Oppia server has a specific \
app available on the Google Play Store")
prop.save()
def setting_oppia_android_packageid_desc(props):
prop = props.objects.get(key=constants.OPPIA_ANDROID_PACKAGEID)
prop.description = _(u"The java package id of the specific app on the \
Google Play Store")
prop.save()
def setting_oppia_badges_enabled_desc(props):
prop = props.objects.get(key=constants.OPPIA_BADGES_ENABLED)
prop.description = _(u"Whether or not badges are enabled for this \
Oppia implementation")
prop.save()
def setting_oppia_points_enabled_desc(props):
prop = props.objects.get(key=constants.OPPIA_POINTS_ENABLED)
prop.description = _(u"Whether or not points are enabled for this \
Oppia implementation")
prop.save()
def setting_oppia_map_visualisation_enabled_desc(props):
prop = props.objects.get(key=constants.OPPIA_MAP_VISUALISATION_ENABLED)
prop.description = _(u"Whether or not the map visualization is enabled \
for this Oppia implementation")
prop.save()
def setting_oppia_cartodb_account_desc(props):
prop = props.objects.get(key=constants.OPPIA_CARTODB_ACCOUNT)
prop.description = _(u"Username for the CartoDB account")
prop.save()
def setting_oppia_cartodb_key_desc(props):
prop = props.objects.get(key=constants.OPPIA_CARTODB_KEY)
prop.description = _(u"CartoDB account API key")
prop.save()
def setting_oppia_google_analytics_code_desc(props):
prop = props.objects.get(key=constants.OPPIA_GOOGLE_ANALYTICS_CODE)
prop.description = _(u"Google Analytics code, if enabled")
prop.save()
def setting_oppia_google_analytics_domain_desc(props):
prop = props.objects.get(key=constants.OPPIA_GOOGLE_ANALYTICS_DOMAIN)
prop.description = _(u"Google Analytics domain name, if enabled")
prop.save()
def setting_oppia_google_analytics_enabled_desc(props):
prop = props.objects.get(key=constants.OPPIA_GOOGLE_ANALYTICS_ENABLED)
prop.description = _(u"Whether or not Google Analytics is enabled")
prop.save()
def setting_oppia_hostname_desc(props):
prop = props.objects.get(key=constants.OPPIA_HOSTNAME)
prop.description = _(u"Domain/hostname for this Oppia server")
prop.save()
def setting_oppia_ipstack_apikey_desc(props):
prop = props.objects.get(key=constants.OPPIA_IPSTACK_APIKEY)
prop.description = _(u"IPStack API key")
prop.save()
def setting_oppia_show_gravatars_desc(props):
prop = props.objects.get(key=constants.OPPIA_SHOW_GRAVATARS)
prop.description = _(u"Whether or not to use Gravatars for users' \
profile pictures")
prop.save()
class Migration(migrations.Migration):
dependencies = [
('settings', '0015_data_retention_setting'),
]
operations = [
migrations.RunPython(add_setting_descriptions),
]
|
aubreystarktoller/lite-boolean-formulae
|
refs/heads/master
|
tests/test_equivalence.py
|
1
|
from lite_boolean_formulae import L
def test_reflexive():
assert L("x") == L("x")
def test_conjunction_with_negation():
assert ((L("x") & (~L("x"))) is False)
def test_disjunction_with_negation():
assert ((L("x") | (~L("x"))) is True)
def test_and_commutative():
assert (L("x") & L("y") & L("z")) == (L("z") & L("y") & L("x"))
def test_or_commutative():
assert (L("x") | L("y") | L("z")) == (L("z") | L("y") | L("x"))
def test_disjunction_over_conjunction():
assert (L("x") | (L("y") & L("z"))) == ((L("x") | L("y")) & (L("x") | L("z")))
def test_conjunction_over_disjunction():
assert (L("x") & (L("y") | L("z"))) == ((L("x") & L("y")) | (L("x") & L("z")))
def test_de_morgan_negation_of_conjunction():
assert (~(L("x") & L("y"))) == ((~L("x")) | (~L("y")))
def test_de_morgan_negation_of_disjunction():
assert (~(L("x") | L("y"))) == ((~L("x")) & (~L("y")))
def double_negation():
assert (~(~L("x"))) == L("x")
def test_true_and_literal():
assert (True & L("x")) == L("x") and (L("x") & True) == L("x")
def test_true_or_literal():
assert ((True | L("x")) is True) and ((L("x") | True ) is True)
def test_false_and_literal():
assert ((False & L("x")) is False) and ((L("x") & False) is False)
def test_false_or_literal():
assert (False | L("x")) == L("x") and (L("x") | False) == L("x")
def test_true_and_formula():
f = L("x") & L("y")
assert (True & f) == f and (f & True) == f
def test_true_or_formula():
f = L("x") | L("y")
assert ((True | f) is True) and ((f | True) is True)
def test_false_and_formula():
f = L("x") & L("y")
assert ((False & f) is False) and ((f & False) is False)
def test_false_or_formula():
f = L("x") | L("y")
assert (False | f) == f and (f | False) == f
def test_substitute_true_into_disjuction():
assert ((L("x") | L("y")).substitute("x", True)) is True
def test_substitute_false_into_conjunction():
assert ((L("x") & L("y")).substitute("x", False) is False)
def test_literal_xored_with_self():
assert ((L("x") ^ L("x")) is False)
def test_literal_xored_with_negated_self():
assert ((L("x") ^ ~L("x")) is True)
def test_xor_is_associative():
assert (L("x") ^ L("y")) == (L("y") ^ L("x"))
def test_literal_xored_with_false():
assert ((L("x") ^ False) == L("x")) and ((False ^ L("x")) == L("x"))
def test_literal_xored_with_true():
assert ((L("x") ^ True) == ~L("x")) and ((True ^ L("x")) == ~L("x"))
|
seanxwzhang/LeetCode
|
refs/heads/master
|
300 Longest Increasing Subsequence/solution.py
|
1
|
# Given an unsorted array of integers, find the length of longest increasing subsequence.
# For example,
# Given [10, 9, 2, 5, 3, 7, 101, 18],
# The longest increasing subsequence is [2, 3, 7, 101], therefore the length is 4. Note that there may be more than one LIS combination, it is only necessary for you to return the length.
# Your algorithm should run in O(n2) complexity.
# Follow up: Could you improve it to O(n log n) time complexity?
class Solution(object):
def subOptimalSolution(self, nums):
l = [None] * len(nums) # l[k] is length of LIS of nums[0:k+1]
res = 0
for i, val in enumerate(nums):
largest = 0
for j in xrange(i):
if l[j] > largest and nums[i] > nums[j]:
largest = l[j]
l[i] = largest + 1
if l[i] > res:
res = l[i]
return res
# Find the index of the largest element in arr that's smaller than than target
# Prerequisite: there exists such element
def biSearch(self, arr, target):
print('searching %d' % target)
l, r = 0, len(arr) - 1
while (r - l) > 1:
m = l + (r - l) / 2
print('arr[%d] is %d, arr[%d] is %d, arr[%d] is %d' % (l, arr[l], r, arr[r], m, arr[m]))
if arr[m] < target:
l = m
else:
r = m
return r
def optiomalSolution(self, nums):
aux = [] # aux[i] is the end element [val, length] of the ith active list
for num in nums:
print(aux)
if not aux:
aux = [num]
# if num is smaller or equal to all current ends, create a new list to the first element
elif num <= aux[0]:
aux[0] = num
# if num is greater than all current ends, added a new list to the tail
elif num > aux[-1]:
aux.append(num)
# else, search for the flooring element of num, extend it, remove any other list having the same length
else:
index = self.biSearch(aux, num)
print('index is %d' % index)
aux[index] = num
return len(aux)
def lengthOfLIS(self, nums):
return self.optiomalSolution(nums)
if __name__ == "__main__":
s = Solution()
r = s.lengthOfLIS([4,10,4,3,8,9])
print(r)
|
sssllliang/edx-analytics-pipeline
|
refs/heads/master
|
edx/analytics/tasks/reports/finance_reports.py
|
1
|
"""Provide entry-point for generating finance reports."""
import luigi
from edx.analytics.tasks.reports.reconcile import (
TransactionReportTask, LoadInternalReportingOrderTransactionsToWarehouse
)
from edx.analytics.tasks.reports.ed_services_financial_report import (
LoadInternalReportingEdServicesReportToWarehouse
)
from edx.analytics.tasks.mapreduce import MapReduceJobTaskMixin
from edx.analytics.tasks.vertica_load import VerticaCopyTaskMixin
class BuildFinancialReportsTask(MapReduceJobTaskMixin, VerticaCopyTaskMixin, luigi.WrapperTask):
"""Provide entry-point for generating finance reports."""
# Instead of importing all of DatabaseImportMixin at this level, we just define
# what we need and are willing to pass through. That way the use of "credentials"
# for the output of the report data is not conflicting.
import_date = luigi.DateParameter()
# Redefine the overwrite parameter to change its default to True.
# This will cause the reports to reload when loading into internal reporting.
overwrite = luigi.BooleanParameter(default=True)
def requires(self):
yield (
TransactionReportTask(
import_date=self.import_date,
n_reduce_tasks=self.n_reduce_tasks,
),
LoadInternalReportingOrderTransactionsToWarehouse(
import_date=self.import_date,
n_reduce_tasks=self.n_reduce_tasks,
schema=self.schema,
credentials=self.credentials,
overwrite=self.overwrite,
),
LoadInternalReportingEdServicesReportToWarehouse(
import_date=self.import_date,
n_reduce_tasks=self.n_reduce_tasks,
schema=self.schema,
credentials=self.credentials,
overwrite=self.overwrite,
),
)
|
yavalvas/yav_com
|
refs/heads/master
|
build/matplotlib/lib/matplotlib/tests/test_simplification.py
|
1
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
from matplotlib import patches, path, transforms
from nose.tools import raises
import io
nan = np.nan
Path = path.Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0,2.0,3.0,2.0e5])
y = np.arange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_xlim(xmin=2,xmax=6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
@cleanup
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(5000,)) * 50
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 3884
@cleanup
def test_sine_plus_noise():
np.random.seed(0)
x = np.sin(np.linspace(0, np.pi * 2.0, 1000)) + np.random.uniform(size=(1000,)) * 0.01
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 876
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (nan, 1), (0, 0), (2, 0), (2, 2), (0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig = plt.figure()
t = arange(65536)
ax = fig.add_subplot(111)
p1 = ax.plot(abs(fft(sin(2*pi*.01*t)*blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 20
@cleanup
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
import base64
if hasattr(base64, 'encodebytes'):
# Python 3 case
decodebytes = base64.decodebytes
else:
# Python 2 case
decodebytes = base64.decodestring
verts = np.fromstring(decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) / 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(), clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
@cleanup
@raises(OverflowError)
def test_throw_rendering_complexity_exceeded():
rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx, yy)
try:
fig.savefig(io.BytesIO())
finally:
rcParams['path.simplify'] = True
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left = 0, bottom = 0, wspace = 0, hspace = 0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim = (0, 5), autoscale_on = False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
|
huang4fstudio/django
|
refs/heads/master
|
django/core/cache/backends/locmem.py
|
586
|
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
campenberger/albackup
|
refs/heads/master
|
albackup/compare.py
|
1
|
from __future__ import print_function
import logging
import json
import os
from jinja2 import Environment,FileSystemLoader
from tempfile import NamedTemporaryFile
import sh
from albackup import loggerFactory
_getLogger=loggerFactory('compare')
_jina_env=Environment(loader=FileSystemLoader('templates'))
class DbCompare(object):
''' Test class to execute the WBSchemaDiff from the sqlworkbench
utilities to compare two schemas and report the differences.
'''
TEMPLATE="compare.sql"
def __init__(self,ref_cfg,target_cfg,sqlwb_dir):
''' Constructor:
* ref_cfg - Configuration dictonary for the reference schema
* target_cfg - Configuration dictionary for the target schema
* sqlwb_dir - Install location of SQLWorkbench
'''
self.ref_cfg=ref_cfg
self.target_cfg=target_cfg
self.sqlwb_dir=os.path.abspath(sqlwb_dir)
self.logger=_getLogger('DbCompare')
def _make_template(self):
''' Method the generate an SQL instruction file
for the compare from the template
'''
template=_jina_env.get_template(self.TEMPLATE)
self._sql_cmdfile=NamedTemporaryFile(mode="w+")
cwd_dir=os.path.abspath(os.path.join(os.getcwd(),'diffs'))
if not os.path.exists(cwd_dir):
os.mkdir(cwd_dir)
context={
'ref': self.ref_cfg,
'target': self.target_cfg,
'sqlwb_dir': self.sqlwb_dir,
'cwd': cwd_dir
}
print(template.render(context),file=self._sql_cmdfile)
self.logger.info('Compare command file rendered to %s',self._sql_cmdfile.name)
self._sql_cmdfile.seek(0)
self.logger.debug('Compare script:\n%s',self._sql_cmdfile.read())
return self._sql_cmdfile
def _compare(self):
''' Method to launch the the SQLWorkbench console application with
the generated sql file
'''
self.logger.info('Comparing database schemas... (takes a while)')
sqlwbconsole=sh.Command(os.path.join(self.sqlwb_dir,'sqlwbconsole.sh'))
output=str(sqlwbconsole("-script={}".format(self._sql_cmdfile.name),_in=[]))
self.logger.debug('database compare scripted returned:\n%s',output)
self.logger.info('Results are in diff-%s.xml and diff-%s.html',self.ref_cfg['name'],self.ref_cfg['name'])
def run(self): # pragma: nocover
''' Runds the SQLWorkbench WBSchemaDiff based on the given configurations
and writes the result in diff-<name>.xml and diff-<name>.html files
'''
self._make_template()
self._compare()
self._sql_cmdfile.close()
if __name__ == '__main__': # pragma: nocover
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s:%(name)-40s:%(levelname)-7s:%(message)s"
)
logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)
logging.getLogger('sh').setLevel(logging.ERROR)
logger=_getLogger()
cfg=None
with open('test.json') as fh:
cfg=json.load(fh)
logger.info('Read configuration from test.json')
ref=filter(lambda db: db['name']=='astar',cfg['databases'])[0]
logger.debug('Found ref cfg for astar: {}'.format(ref))
comp=DbCompare(ref,cfg['restore'],'../sqlworkbench')
comp.run()
|
chuckchen/spark
|
refs/heads/master
|
python/pyspark/tests/test_serializers.py
|
8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import sys
import unittest
from pyspark import serializers
from pyspark.serializers import CloudPickleSerializer, CompressedSerializer, \
AutoBatchedSerializer, BatchedSerializer, AutoSerializer, NoOpSerializer, PairDeserializer, \
FlattenedValuesSerializer, CartesianDeserializer, PickleSerializer, UTF8Deserializer, \
MarshalSerializer
from pyspark.testing.utils import PySparkTestCase, read_int, write_int, ByteArrayOutput, \
have_numpy, have_scipy
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
try:
import xmlrunner # type: ignore[import] # noqa: F401
except ImportError:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
@unittest.skipIf(not have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
import numpy as np
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
class SerializersTest(unittest.TestCase):
def test_chunked_stream(self):
original_bytes = bytearray(range(100))
for data_length in [1, 10, 100]:
for buffer_length in [1, 2, 3, 5, 20, 99, 100, 101, 500]:
dest = ByteArrayOutput()
stream_out = serializers.ChunkedStream(dest, buffer_length)
stream_out.write(original_bytes[:data_length])
stream_out.close()
num_chunks = int(math.ceil(float(data_length) / buffer_length))
# length for each chunk, and a final -1 at the very end
exp_size = (num_chunks + 1) * 4 + data_length
self.assertEqual(len(dest.buffer), exp_size)
dest_pos = 0
data_pos = 0
for chunk_idx in range(num_chunks):
chunk_length = read_int(dest.buffer[dest_pos:(dest_pos + 4)])
if chunk_idx == num_chunks - 1:
exp_length = data_length % buffer_length
if exp_length == 0:
exp_length = buffer_length
else:
exp_length = buffer_length
self.assertEqual(chunk_length, exp_length)
dest_pos += 4
dest_chunk = dest.buffer[dest_pos:dest_pos + chunk_length]
orig_chunk = original_bytes[data_pos:data_pos + chunk_length]
self.assertEqual(dest_chunk, orig_chunk)
dest_pos += chunk_length
data_pos += chunk_length
# ends with a -1
self.assertEqual(dest.buffer[-4:], write_int(-1))
if __name__ == "__main__":
from pyspark.tests.test_serializers import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
GauriGNaik/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/wptserve/wptserve/__init__.py
|
329
|
from server import WebTestHttpd, WebTestServer, Router
from request import Request
from response import Response
|
pombredanne/django-url-filter
|
refs/heads/master
|
test_project/many_to_many/alchemy.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import backref, relationship
from sqlalchemy.schema import ForeignKey, Table
from ..alchemy import Base
class Publication(Base):
__tablename__ = 'many_to_many_publication'
id = Column(Integer, primary_key=True)
title = Column(String(30))
@property
def pk(self):
return self.id
publication_article_association_table = Table(
'many_to_many_article_publications',
Base.metadata,
Column('id', Integer),
Column('publication_id', Integer, ForeignKey('many_to_many_publication.id')),
Column('article_id', Integer, ForeignKey('many_to_many_article.id')),
)
class Article(Base):
__tablename__ = 'many_to_many_article'
id = Column(Integer, primary_key=True)
headline = Column(String(100))
publications = relationship(
Publication,
secondary=publication_article_association_table,
backref=backref('articles', uselist=True),
uselist=True,
)
@property
def pk(self):
return self.id
|
GageGaskins/osf.io
|
refs/heads/develop
|
scripts/analytics/comments.py
|
35
|
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
comment_collection = database['comment']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def main():
dates = [
record['date_created']
for record in comment_collection.find({}, {'date_created': True})
]
plot_dates(dates)
plt.title('comments ({0} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'comment-actions.png'))
plt.close()
if __name__ == '__main__':
main()
|
sassoftware/rbuild
|
refs/heads/master
|
scripts/news.py
|
1
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import codecs
import os
import re
import subprocess
import sys
import textwrap
PRODUCT_NAME = "rBuild"
HEADINGS = [
('feature', 'New Features'),
('api', 'API Changes'),
('bugfix', 'Bug Fixes'),
('internal', 'Internal Changes'),
]
KINDS = set(x[0] for x in HEADINGS)
NEWSDIR = 'NEWS.src'
RE_ISSUE = re.compile('^[A-Z0-9]+-\d+')
def git(args):
args = ['git'] + list(args)
proc = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
if proc.returncode:
sys.exit("git exited with status code %s" % proc.returncode)
return stdout
def ls_files():
return set(
git(['ls-tree', '--name-only', 'HEAD', NEWSDIR + '/']
).splitlines())
def ls_changed():
return set(x[3:] for x in
git(['status', '--porcelain', NEWSDIR + '/']
).splitlines())
def main():
rootdir = os.path.realpath(__file__ + '/../..')
os.chdir(rootdir)
if not os.path.isdir(NEWSDIR):
sys.exit("Can't find news directory")
args = sys.argv[1:]
if args:
command = args.pop(0)
else:
command = 'preview'
if command == 'generate':
generate()
elif command == 'preview':
out, htmlOut, _ = preview()
print 'Text Version:\n'
for line in out:
print line
print 'Html Version:\n'
for line in htmlOut:
print line
else:
sys.exit("Usage: %s <preview|generate>" % sys.argv[0])
def preview(modifiedOK=True):
existing = ls_files()
changed = ls_changed()
ok = existing - changed
kind_map = {}
files = set()
for filename in sorted(os.listdir(NEWSDIR)):
path = '/'.join((NEWSDIR, filename))
if filename[0] == '.' or '.' not in filename:
continue
issue, kind = filename.rsplit('.', 1)
if kind not in KINDS:
print >> sys.stderr, "Ignoring '%s' due to unknown type '%s'" % (
filename, kind)
continue
if path in changed:
if modifiedOK:
print >> sys.stderr, "warning: '%s' is modified." % (path,)
else:
sys.exit("File '%s' is modified and must be committed first." %
(path,))
elif path not in ok:
if modifiedOK:
print >> sys.stderr, "warning: '%s' is not checked in." % (
path,)
else:
sys.exit("File '%s' is not checked in and must be "
"committed first." % (path,))
else:
files.add(path)
entries = [x.replace('\n', ' ') for x in
codecs.open(path, 'r', 'utf8').read().split('\n\n')]
for n, line in enumerate(entries):
entry = line.strip()
if entry:
kind_map.setdefault(kind, []).append((issue, n, entry))
out = ['Changes in %s:' % _getVersion()]
htmlOut = ['<p>%s %s is a maintenance release</p>' % (PRODUCT_NAME,
_getVersion())]
for kind, heading in HEADINGS:
entries = kind_map.get(kind, ())
if not entries:
continue
out.append(' o %s:' % heading)
htmlOut.append('<strong>%s:</strong>' % heading)
htmlOut.append("<ul>")
for issue, _, entry in sorted(entries):
htmlEntry = ' <li>' + entry
if RE_ISSUE.match(issue):
entry += ' (%s)' % issue
htmlEntry += ' (<a href="https://opensource.sas.com/its/browse/%s">%s</a>)' % (issue,issue)
lines = textwrap.wrap(entry, 66)
out.append(' * %s' % (lines.pop(0),))
for line in lines:
out.append(' %s' % (line,))
htmlEntry += '</li>'
htmlOut.append(htmlEntry)
out.append('')
htmlOut.append('</ul>')
return out, htmlOut, files
def generate():
version = _getVersion()
old = codecs.open('NEWS', 'r', 'utf8').read()
if '@NEW@' in old:
sys.exit("error: NEWS contains a @NEW@ section")
elif ('Changes in %s:' % version) in old:
sys.exit("error: NEWS already contains a %s section" % version)
lines, htmlLines, files = preview(modifiedOK=False)
new = '\n'.join(lines) + '\n'
newHtml = '\n'.join(htmlLines) + '\n'
doc = new + old
codecs.open('NEWS', 'w', 'utf8').write(doc)
codecs.open('NEWS.html', 'w', 'utf8').write(newHtml)
sys.stdout.write(new)
print >> sys.stderr, "Updated NEWS"
print >> sys.stderr, "Wrote NEWS.html"
git(['rm'] + sorted(files))
git(['add', 'NEWS'])
print >> sys.stderr, "Deleted %s news fragments" % len(files)
os.system("git status")
def _getVersion():
f = os.popen("make show-version")
return f.read().strip()
main()
|
schets/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_color_quantization.py
|
297
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
arannasousa/pagseguro_xml
|
refs/heads/master
|
pagseguro_xml/assinatura/__init__.py
|
2
|
# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
from .v2 import ApiPagSeguroAssinatura as ApiPagSeguroAssinatura_v2, CONST as CONST_v2
|
cbanta/pjproject
|
refs/heads/master
|
pjsip-apps/src/confbot/confbot.py
|
33
|
# $Id$
#
# SIP Conference Bot
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pjsua as pj
import string
import sys
CFG_FILE = "config"
INFO = 1
TRACE = 2
# Call callback. This would just forward the event to the Member class
class CallCb(pj.CallCallback):
def __init__(self, member, call=None):
pj.CallCallback.__init__(self, call)
self.member = member
def on_state(self):
self.member.on_call_state(self.call)
def on_media_state(self):
self.member.on_call_media_state(self.call)
def on_dtmf_digit(self, digits):
self.member.on_call_dtmf_digit(self.call, digits)
def on_transfer_request(self, dst, code):
return self.member.on_call_transfer_request(self.call, dst, code)
def on_transfer_status(self, code, reason, final, cont):
return self.member.on_call_transfer_status(self.call, code, reason, final, cont)
def on_replace_request(self, code, reason):
return self.member.on_call_replace_request(self.call, code, reason)
def on_replaced(self, new_call):
self.member.on_call_replaced(self.call, new_call)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, call=self.call)
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, call=self.call)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, call=self.call)
# Buddy callback. This would just forward the event to Member class
class BuddyCb(pj.BuddyCallback):
def __init__(self, member, buddy=None):
pj.BuddyCallback.__init__(self, buddy)
self.member = member
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, buddy=self.buddy)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, buddy=self.buddy)
def on_state(self):
self.member.on_pres_state(self.buddy)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, buddy=self.buddy)
##############################################################################
#
#
# This class represents individual room member (either/both chat and voice conf)
#
#
class Member:
def __init__(self, bot, uri):
self.uri = uri
self.bot = bot
self.call = None
self.buddy = None
self.bi = pj.BuddyInfo()
self.in_chat = False
self.in_voice = False
self.im_error = False
self.html = False
def __str__(self):
str = string.ljust(self.uri, 30) + " -- "
if self.buddy:
bi = self.buddy.info()
str = str + bi.online_text
else:
str = str + "Offline"
str = str + " ["
if (self.in_voice):
str = str + " voice"
if (self.in_chat):
str = str + " chat"
if (self.html):
str = str + " html"
else:
str = str + " plain"
if (self.im_error):
str = str + " im_error"
str = str + "]"
return str
def join_call(self, call):
if self.call:
self.call.hangup(603, "You have been disconnected for making another call")
self.call = call
call.set_callback(CallCb(self, call))
msg = "%(uri)s is attempting to join the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def join_chat(self):
if not self.buddy:
self.bot.DEBUG(self.uri + " joining chatroom...\n", INFO)
self.buddy = self.bot.acc.add_buddy(self.uri)
self.buddy.set_callback(BuddyCb(self, self.buddy))
self.buddy.subscribe()
else:
self.bot.DEBUG(self.uri + " already in chatroom, resubscribing..\n", INFO)
self.buddy.subscribe()
def send_pager(self, body, mime="text/plain"):
self.bot.DEBUG("send_pager() to " + self.uri)
if self.in_chat and not self.im_error and self.buddy:
if self.html:
#This will make us receive html!
#mime = "text/html"
body = body.replace("<", "<")
body = body.replace(">", ">")
body = body.replace('"', """)
body = body.replace("\n", "<BR>\n")
self.buddy.send_pager(body, content_type=mime)
self.bot.DEBUG("..sent\n")
else:
self.bot.DEBUG("..not sent!\n")
def on_call_state(self, call):
ci = call.info()
if ci.state==pj.CallState.DISCONNECTED:
if self.in_voice:
msg = "%(uri)s has left the voice conference (%(1)d/%(2)s)" % \
{'uri': self.uri, '1': ci.last_code, '2': ci.last_reason}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
self.in_voice = False
self.call = None
self.bot.on_member_left(self)
elif ci.state==pj.CallState.CONFIRMED:
msg = "%(uri)s has joined the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def on_call_media_state(self, call):
self.bot.DEBUG("Member.on_call_media_state\n")
ci = call.info()
if ci.conf_slot!=-1:
if not self.in_voice:
msg = self.uri + " call media is active"
self.bot.broadcast_pager(None, msg)
self.in_voice = True
self.bot.add_to_voice_conf(self)
else:
if self.in_voice:
msg = self.uri + " call media is inactive"
self.bot.broadcast_pager(None, msg)
self.in_voice = False
def on_call_dtmf_digit(self, call, digits):
msg = "%(uri)s sent DTMF digits %(dig)s" % \
{'uri': self.uri, 'dig': digits}
self.bot.broadcast_pager(None, msg)
def on_call_transfer_request(self, call, dst, code):
msg = "%(uri)s is transferring the call to %(dst)s" % \
{'uri': self.uri, 'dst': dst}
self.bot.broadcast_pager(None, msg)
return 202
def on_call_transfer_status(self, call, code, reason, final, cont):
msg = "%(uri)s call transfer status is %(code)d/%(res)s" % \
{'uri': self.uri, 'code': code, 'res': reason}
self.bot.broadcast_pager(None, msg)
return True
def on_call_replace_request(self, call, code, reason):
msg = "%(uri)s is requesting call replace" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
return (code, reason)
def on_call_replaced(self, call, new_call):
msg = "%(uri)s call is replaced" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
def on_pres_state(self, buddy):
old_bi = self.bi
self.bi = buddy.info()
msg = "%(uri)s status is %(st)s" % \
{'uri': self.uri, 'st': self.bi.online_text}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(self, msg)
if self.bi.sub_state==pj.SubscriptionState.ACTIVE:
if not self.in_chat:
self.in_chat = True
buddy.send_pager("Welcome to chatroom")
self.bot.broadcast_pager(self, self.uri + " has joined the chat room")
else:
self.in_chat = True
elif self.bi.sub_state==pj.SubscriptionState.NULL or \
self.bi.sub_state==pj.SubscriptionState.TERMINATED or \
self.bi.sub_state==pj.SubscriptionState.UNKNOWN:
self.buddy.delete()
self.buddy = None
if self.in_chat:
self.in_chat = False
self.bot.broadcast_pager(self, self.uri + " has left the chat room")
else:
self.in_chat = False
self.bot.on_member_left(self)
def on_typing(self, is_typing, call=None, buddy=None):
if is_typing:
msg = self.uri + " is typing..."
else:
msg = self.uri + " has stopped typing"
self.bot.broadcast_pager(self, msg)
def on_pager(self, mime_type, body, call=None, buddy=None):
if not self.bot.handle_cmd(self, None, body):
msg = self.uri + ": " + body
self.bot.broadcast_pager(self, msg, mime_type)
def on_pager_status(self, body, im_id, code, reason, call=None, buddy=None):
self.im_error = (code/100 != 2)
##############################################################################
#
#
# The Bot instance (singleton)
#
#
class Bot(pj.AccountCallback):
def __init__(self):
pj.AccountCallback.__init__(self, None)
self.lib = pj.Lib()
self.acc = None
self.calls = []
self.members = {}
self.cfg = None
def DEBUG(self, msg, level=TRACE):
print msg,
def helpstring(self):
return """
--h[elp] Display this help screen
--j[oin] Join the chat room
--html on|off Set to receive HTML or plain text
Participant commands:
--s[how] Show confbot settings
--leave Leave the chatroom
--l[ist] List all members
Admin commands:
--a[dmin] <CMD> Where <CMD> are:
list List the admins
add <URI> Add URI as admin
del <URI> Remove URI as admin
rr Reregister account to server
call <URI> Make call to the URI and add to voice conf
dc <URI> Disconnect call to URI
hold <URI> Hold call with that URI
update <URI> Send UPDATE to call with that URI
reinvite <URI> Send re-INVITE to call with that URI
"""
def listmembers(self):
msg = ""
for uri, m in self.members.iteritems():
msg = msg + str(m) + "\n"
return msg
def showsettings(self):
ai = self.acc.info()
msg = """
ConfBot status and settings:
URI: %(uri)s
Status: %(pres)s
Reg Status: %(reg_st)d
Reg Reason: %(reg_res)s
""" % {'uri': ai.uri, 'pres': ai.online_text, \
'reg_st': ai.reg_status, 'reg_res': ai.reg_reason}
return msg
def main(self, cfg_file):
try:
cfg = self.cfg = __import__(cfg_file)
self.lib.init(ua_cfg=cfg.ua_cfg, log_cfg=cfg.log_cfg, media_cfg=cfg.media_cfg)
self.lib.set_null_snd_dev()
transport = None
if cfg.udp_cfg:
transport = self.lib.create_transport(pj.TransportType.UDP, cfg.udp_cfg)
if cfg.tcp_cfg:
t = self.lib.create_transport(pj.TransportType.TCP, cfg.tcp_cfg)
if not transport:
transport = t
self.lib.start()
if cfg.acc_cfg:
self.DEBUG("Creating account %(uri)s..\n" % {'uri': cfg.acc_cfg.id}, INFO)
self.acc = self.lib.create_account(cfg.acc_cfg, cb=self)
else:
self.DEBUG("Creating account for %(t)s..\n" % \
{'t': transport.info().description}, INFO)
self.acc = self.lib.create_account_for_transport(transport, cb=self)
self.acc.set_basic_status(True)
# Wait for ENTER before quitting
print "Press q to quit or --help/--h for help"
while True:
input = sys.stdin.readline().strip(" \t\r\n")
if not self.handle_cmd(None, None, input):
if input=="q":
break
self.lib.destroy()
self.lib = None
except pj.Error, e:
print "Exception: " + str(e)
if self.lib:
self.lib.destroy()
self.lib = None
def broadcast_pager(self, exclude_member, body, mime_type="text/plain"):
self.DEBUG("Broadcast: " + body + "\n")
for uri, m in self.members.iteritems():
if m != exclude_member:
m.send_pager(body, mime_type)
def add_to_voice_conf(self, member):
if not member.call:
return
src_ci = member.call.info()
self.DEBUG("bot.add_to_voice_conf\n")
for uri, m in self.members.iteritems():
if m==member:
continue
if not m.call:
continue
dst_ci = m.call.info()
if dst_ci.media_state==pj.MediaState.ACTIVE and dst_ci.conf_slot!=-1:
self.lib.conf_connect(src_ci.conf_slot, dst_ci.conf_slot)
self.lib.conf_connect(dst_ci.conf_slot, src_ci.conf_slot)
def on_member_left(self, member):
if not member.call and not member.buddy:
del self.members[member.uri]
del member
def handle_admin_cmd(self, member, body):
if member and self.cfg.admins and not member.uri in self.cfg.admins:
member.send_pager("You are not admin")
return
args = body.split()
msg = ""
if len(args)==1:
args.append(" ")
if args[1]=="list":
if not self.cfg.admins:
msg = "Everyone is admin!"
else:
msg = str(self.cfg.admins)
elif args[1]=="add":
if len(args)!=3:
msg = "Usage: add <URI>"
else:
self.cfg.admins.append(args[2])
msg = args[2] + " added as admin"
elif args[1]=="del":
if len(args)!=3:
msg = "Usage: del <URI>"
elif args[2] not in self.cfg.admins:
msg = args[2] + " is not admin"
else:
self.cfg.admins.remove(args[2])
msg = args[2] + " has been removed from admins"
elif args[1]=="rr":
msg = "Reregistering.."
self.acc.set_registration(True)
elif args[1]=="call":
if len(args)!=3:
msg = "Usage: call <URI>"
else:
uri = args[2]
try:
call = self.acc.make_call(uri)
except pj.Error, e:
msg = "Error: " + str(e)
call = None
if call:
if not uri in self.members:
m = Member(self, uri)
self.members[m.uri] = m
else:
m = self.members[uri]
msg = "Adding " + m.uri + " to voice conference.."
m.join_call(call)
elif args[1]=="dc" or args[1]=="hold" or args[1]=="update" or args[1]=="reinvite":
if len(args)!=3:
msg = "Usage: " + args[1] + " <URI>"
else:
uri = args[2]
if not uri in self.members:
msg = "Member not found/URI doesn't match (note: case matters!)"
else:
m = self.members[uri]
if m.call:
if args[1]=="dc":
msg = "Disconnecting.."
m.call.hangup(603, "You're disconnected by admin")
elif args[1]=="hold":
msg = "Holding the call"
m.call.hold()
elif args[1]=="update":
msg = "Sending UPDATE"
m.call.update()
elif args[1]=="reinvite":
msg = "Sending re-INVITE"
m.call.reinvite()
else:
msg = "He is not in call"
else:
msg = "Unknown admin command " + body
#print "msg is '%(msg)s'" % {'msg': msg}
if True:
if member:
member.send_pager(msg)
else:
print msg
def handle_cmd(self, member, from_uri, body):
body = body.strip(" \t\r\n")
msg = ""
handled = True
if body=="--l" or body=="--list":
msg = self.listmembers()
if msg=="":
msg = "Nobody is here"
elif body[0:3]=="--s":
msg = self.showsettings()
elif body[0:6]=="--html" and member:
if body[8:11]=="off":
member.html = False
else:
member.html = True
elif body=="--h" or body=="--help":
msg = self.helpstring()
elif body=="--leave":
if not member or not member.buddy:
msg = "You are not in chatroom"
else:
member.buddy.unsubscribe()
elif body[0:3]=="--j":
if not from_uri in self.members:
m = Member(self, from_uri)
self.members[m.uri] = m
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
else:
m = self.members[from_uri]
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
elif body[0:3]=="--a":
self.handle_admin_cmd(member, body)
handled = True
else:
handled = False
if msg:
if member:
member.send_pager(msg)
elif from_uri:
self.acc.send_pager(from_uri, msg);
else:
print msg
return handled
def on_incoming_call(self, call):
self.DEBUG("on_incoming_call from %(uri)s\n" % {'uri': call.info().remote_uri}, INFO)
ci = call.info()
if not ci.remote_uri in self.members:
m = Member(self, ci.remote_uri)
self.members[m.uri] = m
m.join_call(call)
else:
m = self.members[ci.remote_uri]
m.join_call(call)
call.answer(200)
def on_incoming_subscribe(self, buddy, from_uri, contact_uri, pres_obj):
self.DEBUG("on_incoming_subscribe from %(uri)s\n" % from_uri, INFO)
return (200, 'OK')
def on_reg_state(self):
ai = self.acc.info()
self.DEBUG("Registration state: %(code)d/%(reason)s\n" % \
{'code': ai.reg_status, 'reason': ai.reg_reason}, INFO)
if ai.reg_status/100==2 and ai.reg_expires > 0:
self.acc.set_basic_status(True)
def on_pager(self, from_uri, contact, mime_type, body):
body = body.strip(" \t\r\n")
if not self.handle_cmd(None, from_uri, body):
self.acc.send_pager(from_uri, "You have not joined the chat room. Type '--join' to join or '--help' for the help")
def on_pager_status(self, to_uri, body, im_id, code, reason):
pass
def on_typing(self, from_uri, contact, is_typing):
pass
##############################################################################
#
#
# main()
#
#
if __name__ == "__main__":
bot = Bot()
bot.main(CFG_FILE)
|
Andrew-Katcha/storyteller
|
refs/heads/master
|
env/lib/python3.4/site-packages/pip/_vendor/progress/counter.py
|
510
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
|
acrsilva/animated-zZz-machine
|
refs/heads/master
|
lib/tablaDistancias.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
sys.path.insert(0, '../lib')
from pyqtgraph.Qt import QtGui
from PyQt4.QtGui import *
import colores
class TablaDistancias(QTableWidget):
def __init__(self, data, *args):
QTableWidget.__init__(self, *args)
self.data = data
self.rellenar()
self.resizeColumnsToContents()
self.resizeRowsToContents()
#Rellena la diagonal inferior de la tabla y colorea la distancia menor de cada fila
def rellenar(self):
i = self.data.shape[0]-2
while(i >= 0):
j=i
if(j==0): min = j
else: min = j-1
while(j >= 0):
if(self.data[i+1][j] < self.data[i+1][min]): min = j
self.setItem(i, j, QTableWidgetItem(format(self.data[i+1][j], '.1f')))
j -= 1
self.item(i,min).setBackground(QtGui.QColor(colores.marcatabla))
i -= 1
|
CivilHub/CivilHub
|
refs/heads/master
|
staticpages/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
dr-nate/msmbuilder
|
refs/heads/master
|
msmbuilder/io/gather_metadata.py
|
9
|
# Author: Matthew Harrigan <matthew.harrigan@outlook.com>
# Contributors:
# Copyright (c) 2016, Stanford University
# All rights reserved.
import glob
import os
import re
import warnings
import mdtraj as md
import pandas as pd
class ParseWarning(UserWarning):
pass
class _Parser(object):
def parse_fn(self, fn):
raise NotImplementedError
@property
def index(self):
raise NotImplementedError
class GenericParser(_Parser):
"""Parse trajectories in a fully configurable manner
Parameters
----------
fn_re : str
Regular expression with capture groups to transform trajectory
filenames into keys
group_names : list of str
Capture group names (to serve as MultiIndex names)
group_transforms : list of functions
Apply these functions to capture groups
top_fn : str
Topology filename
step_ps : int
Timestep of frames in picoseconds
"""
def __init__(self,
fn_re,
group_names,
group_transforms,
top_fn,
step_ps,
):
self.fn_re = re.compile(fn_re)
self.group_names = group_names
self.group_transforms = group_transforms
self.top_fn = top_fn
self.step_ps = step_ps
try:
assert os.path.exists(top_fn)
except:
warnings.warn("Topology file doesn't actually exist! "
"You may (will) run into issues later when you "
"try to load it.", ParseWarning)
assert len(group_names) == len(group_transforms)
assert len(group_names) == self.fn_re.groups
@property
def index(self):
return list(self.group_names)
def parse_fn(self, fn):
meta = {
'traj_fn': fn,
'top_fn': self.top_fn,
'top_abs_fn': os.path.abspath(self.top_fn),
}
try:
with md.open(fn) as f:
meta['nframes'] = len(f)
except Exception as e:
warnings.warn("Could not determine the number of frames for {}: {}"
.format(fn, e), ParseWarning)
if self.step_ps is not None:
meta['step_ps'] = self.step_ps
# Get indices
ma = self.fn_re.search(fn)
if ma is None:
raise ValueError("Filename {} did not match the "
"regular rexpression {}".format(fn, self.fn_re))
meta.update({gn: transform(ma.group(gi))
for gn, transform, gi
in zip(self.group_names, self.group_transforms,
range(1, len(self.group_names) + 1))
})
return meta
class NumberedRunsParser(GenericParser):
"""Parse trajectories that are numbered with integers.
Parameters
----------
traj_fmt : str
A format string with {run} in it that gives the filename to look
for. {run} will be captured and turned into an integer.
top_fn : str
Topology filename
step_ps : int
Trajectory frame step in picoseconds
"""
def __init__(self, traj_fmt="trajectory-{run}.xtc", top_fn="",
step_ps=None):
# Test the input
try:
traj_fmt.format(run=0)
except:
raise ValueError("Invalid format string {}".format(traj_fmt))
# Build a regex from format string
s1, s2 = re.split(r'\{run\}', traj_fmt)
capture_group = r'(\d+)'
fn_re = re.escape(s1) + capture_group + re.escape(s2)
# Call generic
super(NumberedRunsParser, self).__init__(
fn_re=fn_re,
group_names=['run'],
group_transforms=[int],
top_fn=top_fn,
step_ps=step_ps
)
class HierarchyParser(GenericParser):
"""Parse a hierarchical index from files nested in directories
A trajectory with path:
PROJ9704.new/RUN4/CLONE10.xtc
will be given an index of
('PROJ9704.new', 'RUN4', 'CLONE10.xtc')
If you set the flag ignore_fext=True, it will be given an index of
('PROJ9704', 'RUN4', 'CLONE10')
Parameters
----------
levels : list of str
Level names
n_levels : int
Number of levels. Either this or levels must be provided (but
not both). The levels will be named i0, i1, ... i(n-1)
top_fn : str
Topology filename
step_ps : int
Number of picoseconds per frame
ignore_fext : bool
Ignore file extensions. If set to true, this will fail if there is
more than one "." per file/directory name. Anything after a "." is
considered a file extension and will be ignored; including in
directory names
"""
def __init__(self, levels=None, n_levels=None, top_fn="", step_ps=None,
ignore_fext=False):
if (levels is None) == (n_levels is None):
raise ValueError("Please specify levels or n_levels, but not both")
if levels is None:
levels = ["i{i}".format(i=i) for i in range(n_levels)]
if ignore_fext:
# regex notes:
# 1. (?:...) means non-capturing group
subre = r'([a-zA-Z0-9_\-]+)(?:\.[a-zA-Z0-9]+)?'
else:
subre = r'([a-zA-Z0-9_\.\-]+)'
fn_re = r'\/'.join(subre for _ in levels)
super(HierarchyParser, self).__init__(
fn_re=fn_re,
group_names=levels,
group_transforms=[str for _ in levels],
top_fn=top_fn,
step_ps=step_ps
)
def gather_metadata(fn_glob, parser):
"""Given a glob and a parser object, create a metadata dataframe.
Parameters
----------
fn_glob : str
Glob string to find trajectory files.
parser : descendant of _Parser
Object that handles conversion of filenames to metadata rows.
"""
meta = pd.DataFrame(parser.parse_fn(fn) for fn in glob.iglob(fn_glob))
return meta.set_index(parser.index).sort_index()
|
open-mmlab/mmdetection
|
refs/heads/master
|
configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py
|
1
|
_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
eenchev/idea-note-taking-app
|
refs/heads/master
|
env/lib/python2.7/site-packages/jinja2/meta.py
|
222
|
# -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types, iteritems
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
if action == 'resolve':
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
windedge/odoo
|
refs/heads/8.0
|
addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py
|
374
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hryearlysalary'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hryearlysalary'
_wrapped_report_class = employees_yearly_salary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cs-hse-projects/DataSpider_Dubov
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
setup(
name = "TennisSpider",
version = "0.31",
packages = ['TennisSpider'],
author = "Dmitry Dubov",
author_email = "dmitry.s.dubov@gmail.com",
description = "Package for parsing info about tennis results and stats.",
license = "PSF",
keywords = "Tennis, Results",
url = "https://github.com/cs-hse-projects/DataSpider_Dubov",
entry_points={
'console_scripts':
['get_tennis=TennisSpider.main:main']
}
)
|
hefen1/chromium
|
refs/heads/master
|
tools/telemetry/telemetry/core/platform/ios_device.py
|
3
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import subprocess
from telemetry.core import platform
from telemetry.core.platform import device
class IOSDevice(device.Device):
def __init__(self):
super(IOSDevice, self).__init__(name='ios', guid='ios')
@classmethod
def GetAllConnectedDevices(cls):
return []
def _IsIosDeviceAttached():
devices = subprocess.check_output('system_profiler SPUSBDataType', shell=True)
for line in devices.split('\n'):
if line and re.match(r'\s*(iPod|iPhone|iPad):', line):
return True
return False
def FindAllAvailableDevices(_):
"""Returns a list of available devices.
"""
# TODO(baxley): Add support for all platforms possible. Probably Linux,
# probably not Windows.
if platform.GetHostPlatform().GetOSName() != 'mac':
return []
if not _IsIosDeviceAttached():
return []
return [IOSDevice()]
|
hchen1202/django-react
|
refs/heads/master
|
virtualenv/lib/python3.6/site-packages/django/db/backends/mysql/compiler.py
|
691
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
sql, params = self.as_sql()
return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
skybet/jenkins-job-wrecker
|
refs/heads/master
|
jenkins_job_wrecker/modules/handlers.py
|
1
|
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.registry import Registry
class Handlers(jenkins_job_wrecker.modules.base.Base):
component = 'handlers'
def gen_yml(self, yml_parent, data):
for child in data:
handler_name = child.tag.lower()
settings = []
try:
self.registry.dispatch(self.component, handler_name, child, settings)
if not settings:
continue
for setting in settings:
key, value = setting
if key in yml_parent:
yml_parent[key].append(value[0])
else:
yml_parent[key] = value
except Exception:
print 'last called %s' % handler_name
raise
# Handle "<actions/>"
def actions(top, parent):
# Nothing to do if it's empty.
# Otherwise...
if list(top) and len(list(top)) > 0:
raise NotImplementedError("Don't know how to handle a "
"non-empty <actions> element.")
# Handle "<authToken>tokenvalue</authToken>"
def authtoken(top, parent):
parent.append(['auth-token', top.text])
# Handle "<description>my cool job</description>"
def description(top, parent):
parent.append(['description', top.text])
# Handle "<keepDependencies>false</keepDependencies>"
def keepdependencies(top, parent):
# JJB cannot handle any other value than false, here.
# There is no corresponding YAML option.
pass
# Handle "<canRoam>true</canRoam>"
def canroam(top, parent):
# JJB doesn't have an explicit YAML setting for this; instead, it
# infers it from the "node" parameter. So there's no need to handle the
# XML here.
pass
# Handle "<disabled>false</disabled>"
def disabled(top, parent):
parent.append(['disabled', top.text == 'true'])
# Handle "<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>" NOQA
def blockbuildwhendownstreambuilding(top, parent):
parent.append(['block-downstream', top.text == 'true'])
# Handle "<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>" NOQA
def blockbuildwhenupstreambuilding(top, parent):
parent.append(['block-upstream', top.text == 'true'])
def concurrentbuild(top, parent):
parent.append(['concurrent', top.text == 'true'])
def axes(top, parent):
axes = []
mapper = {
'hudson.matrix.LabelExpAxis': 'label-expression',
'hudson.matrix.LabelAxis': 'slave',
'hudson.matrix.TextAxis': 'user-defined',
'jenkins.plugins.shiningpanda.matrix.PythonAxis': 'python',
}
for child in top:
try:
axis = {'type': mapper[child.tag]}
except KeyError:
raise NotImplementedError("cannot handle XML %s" % child.tag)
for axis_element in child:
if axis_element.tag == 'name':
axis['name'] = axis_element.text
if axis_element.tag == 'values':
values = []
for value_element in axis_element:
values.append(value_element.text)
axis['values'] = values
axes.append({'axis': axis})
parent.append(['axes', axes])
def executionstrategy(top, parent):
strategy = {}
for child in top:
if child.tag == 'runSequentially':
strategy['sequential'] = (child.text == 'true')
elif child.tag == 'sorter':
# Is there anything but NOOP?
pass
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append(['execution-strategy', strategy])
# Handle "<logrotator>...</logrotator>"'
def logrotator(top, parent):
logrotate = {}
for child in top:
if child.tag == 'daysToKeep':
logrotate['daysToKeep'] = child.text
elif child.tag == 'numToKeep':
logrotate['numToKeep'] = child.text
elif child.tag == 'artifactDaysToKeep':
logrotate['artifactDaysToKeep'] = child.text
elif child.tag == 'artifactNumToKeep':
logrotate['artifactNumToKeep'] = child.text
elif child.tag == 'discardOnlyOnSuccess':
logrotate['discardOnlyOnSuccess'] = child.text
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append(['logrotate', logrotate])
# Handle "<combinationFilter>a != "b"</combinationFilter>"
def combinationfilter(top, parent):
parent.append(['combination-filter', top.text])
# Handle "<assignedNode>server.example.com</assignedNode>"
def assignednode(top, parent):
parent.append(['node', top.text])
# Handle "<displayName>my cool job</displayName>"
def displayname(top, parent):
parent.append(['display-name', top.text])
# Handle "<quietPeriod>5</quietPeriod>"
def quietperiod(top, parent):
parent.append(['quiet-period', top.text])
# Handle "<scmCheckoutRetryCount>8</scmCheckoutRetryCount>"
def scmcheckoutretrycount(top, parent):
parent.append(['retry-count', top.text])
def customworkspace(top, parent):
parent.append(['workspace', top.text])
def jdk(top, parent):
parent.append(['jdk', top.text])
def definition(top, parent):
reg = Registry()
handlers = Handlers(reg)
# Create register
reg = Registry()
# sub-level "definition" data
definition = {}
parent.append(['definition', definition])
reg = Registry()
handlers = Handlers(reg)
handlers.gen_yml(definition, top)
|
RoelAdriaans-B-informed/website
|
refs/heads/10.0
|
website_sale_order_company/models/__init__.py
|
33
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import website
from . import sale_order
|
hortonworks/hortonworks-sandbox
|
refs/heads/master
|
desktop/core/ext-py/Django-1.2.3/django/contrib/localflavor/uk/forms.py
|
313
|
"""
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
|
SurajShah525/pythonprogramming
|
refs/heads/master
|
problems/recursion/RecursiveFibonnaci.py
|
1
|
import sys
sys.setrecursionlimit(1500)
def fib(n):
if n < 2:
return n
return fib(n-2) + fib(n-1)
print map(fib, range(0,10))
|
mavit/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to resize online disks by specifing a SCSI controller
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. Value should be multiple of C(num_cpus).'
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- ' - C(memory_reservation) (integer): Amount of memory in MB to set resource limits for memory. version_added: 2.5'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CensOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine.
- >
Valid values are referenced here:
U(http://pubs.vmware.com/vsphere-6-5/topic/com.vmware.wssdk.apiref.doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Resizing disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): Datastore to use for the disk. If C(autoselect_datastore) is enabled, filter datastore selection.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. Specify only if C(datastore) is not specified.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso). Required if type is set C(iso).'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensetive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This will take precendence over "disk.datastore" parameter.'
- This parameter is useful to override datastore or datastore cluster setting.
- For example, when user has different datastore or datastore cluster for templates and virtual machines.
- Please see example for more usage.
version_added: '2.7'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation: 512
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: string
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMWare Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller():
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = 0
return ide_ctl
@staticmethod
def create_cdrom(ide_ctl, cdrom_type, iso_path=None):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_ctl.device.key
cdrom_spec.device.key = -1
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and self.is_valid_mac_addr(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
@staticmethod
def is_valid_mac_addr(mac_addr):
"""
Function to validate MAC address for given string
Args:
mac_addr: string to validate as MAC address
Returns: (Boolean) True if string is valid MAC address, otherwise False
"""
mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
return bool(mac_addr_regex.match(mac_addr))
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
tmpobjs = objects.copy()
for k, v in objects.items():
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
tmpobjs.pop(k, None)
objects = tmpobjs
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.change_detected = False
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': False, 'failed': True, 'msg': task.info.error.msg}
else:
return {'changed': True, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template'] and not self.params['guest_id']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError as e:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware']:
mem_reservation = None
try:
mem_reservation = int(self.params['hardware'].get('mem_reservation'))
except ValueError as e:
self.module.fail_json(msg="hardware.mem_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError as e:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError as e:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError as e:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError as e:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
self.configspec.memoryMB = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation' in self.params['hardware']:
memory_reservation_mb = 0
try:
memory_reservation_mb = int(self.params['hardware']['memory_reservation'])
except ValueError as e:
self.module.fail_json(msg="Failed to set memory_reservation value."
"Valid value for memory_reservation value in MB (integer): %s" % e)
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.reservation = memory_reservation_mb
self.configspec.memoryAllocation = mem_alloc
if vm_obj is None or self.configspec.memoryAllocation.reservation != vm_obj.config.memoryAllocation.reservation:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
if vm_obj is None or self.configspec.firmware != vm_obj.config.firmware:
self.change_detected = True
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if "cdrom" in self.params and self.params["cdrom"]:
if "type" not in self.params["cdrom"] or self.params["cdrom"]["type"] not in ["none", "client", "iso"]:
self.module.fail_json(msg="cdrom.type is mandatory")
if self.params["cdrom"]["type"] == "iso" and ("iso_path" not in self.params["cdrom"] or not self.params["cdrom"]["iso_path"]):
self.module.fail_json(msg="cdrom.iso_path is mandatory in case cdrom.type is iso")
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
cdrom_spec = None
cdrom_device = self.get_vm_cdrom_device(vm=vm_obj)
iso_path = self.params["cdrom"]["iso_path"] if "iso_path" in self.params["cdrom"] else None
if cdrom_device is None:
# Creating new CD-ROM
ide_device = self.get_vm_ide_device(vm=vm_obj)
if ide_device is None:
# Creating new IDE device
ide_device = self.device_helper.create_ide_controller()
self.change_detected = True
self.configspec.deviceChange.append(ide_device)
elif len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4 IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_ctl=ide_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
# Updating an existing CD-ROM
if self.params["cdrom"]["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif self.params["cdrom"]["type"] == "iso":
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (self.params["cdrom"]["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (self.params["cdrom"]["type"] != "none")
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.hardware.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 15):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
if vm_obj is not None:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state != 'error':
self.change_detected = True
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
def get_device_by_type(self, vm=None, type=None):
if vm is None or type is None:
return None
for device in vm.config.hardware.device:
if isinstance(device, type):
return device
return None
def get_vm_cdrom_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not PyVmomiDeviceHelper.is_valid_mac_addr(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMWare best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup." % pg_obj.name)
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
self.module.fail_json(msg="Failed to set vApp property field='%s' and value='%s'. Error: %s"
% (property_name, property_value, to_text(e)))
else:
if property_spec.get('operation') == 'remove':
# attemp to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
ident.userData.computerName.name = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
ident.guiUnattended.timeZone = self.params['customization']['timezone']
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
kb = self.get_configured_disk_size(expected_disk_spec)
# VMWare doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception as e:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
""" Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
cluster_name = cluster or self.params.get('cluster', None)
host_name = host or self.params.get('esxi_hostname', None)
resource_pool_name = resource_pool or self.params.get('resource_pool', None)
# get the datacenter object
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
# if cluster is given, get the cluster object
if cluster_name:
cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
if not cluster:
self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
# if host is given, get the cluster object using the host
elif host_name:
host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
if not host:
self.module.fail_json(msg='Unable to find host "%s"' % host_name)
cluster = host.parent
else:
cluster = None
# get resource pools limiting search to cluster or datacenter
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
if not resource_pool:
if resource_pool_name:
self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
else:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
if self.params['datastore']:
# Give precendence to datastore value provided by user
# User may want to deploy VM to specific datastore.
datastore_name = self.params['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec'):
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# create the relocation spec
relospec = vim.vm.RelocateSpec()
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
relospec.host = self.select_host()
relospec.datastore = datastore
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
relospec.pool = resource_pool
linked_clone = self.params.get('linked_clone')
snapshot_src = self.params.get('snapshot_src', None)
if linked_clone:
if snapshot_src is not None:
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
else:
self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
" required together for linked clone operation.")
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=relospec)
if self.customspec:
clonespec.customization = self.customspec
if snapshot_src is not None:
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
snapname=snapshot_src)
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
' snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_detected,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if self.params['wait_for_ip_address'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
self.wait_for_vm_ip(vm)
vm_facts = self.gather_facts(vm)
return {'changed': self.change_detected, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
change_applied = False
relospec = vim.vm.RelocateSpec()
if self.params['resource_pool']:
relospec.pool = self.get_resource_pool()
if relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=relospec)
self.wait_for_task(task)
change_applied = True
# Only send VMWare task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
change_applied = True
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
resource_pool = self.get_resource_pool()
kwargs = dict(pool=resource_pool)
if self.params.get('esxi_hostname', None):
host_system_obj = self.select_host()
kwargs.update(host=host_system_obj)
try:
self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(msg="Virtual machine is not marked"
" as template : %s" % to_native(invalid_state.msg))
except vim.fault.InvalidDatastore as invalid_ds:
self.module.fail_json(msg="Converting template to virtual machine"
" operation cannot be performed on the"
" target datastores: %s" % to_native(invalid_ds.msg))
except vim.fault.CannotAccessVmComponent as cannot_access:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" as operation unable access virtual machine"
" component: %s" % to_native(cannot_access.msg))
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to : %s" % to_native(invalid_argument.msg))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to generic error : %s" % to_native(generic_exc))
# Automatically update VMWare UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
change_applied = True
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': change_applied, 'failed': False, 'instance': vm_facts}
@staticmethod
def wait_for_task(task):
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(1)
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
ips = None
facts = {}
thispoll = 0
while not ips and thispoll <= poll:
newvm = self.get_vm()
facts = self.gather_facts(newvm)
if facts['ipv4'] or facts['ipv6']:
ips = True
else:
time.sleep(sleep)
thispoll += 1
return facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type='dict', default={}),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
vapp_properties=dict(type='list', default=[]),
datastore=dict(type='str'),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm)
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
wait_result = wait_for_vm_ip(pyv.content, vm)
if not wait_result:
module.fail_json(msg='Waiting for IP address timed out')
tmp_result['instance'] = wait_result
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
uxebu/tddbin-backend
|
refs/heads/master
|
src/core/admin.py
|
1
|
from django.contrib import admin
from models import Session
from models import Spec
class SessionAdmin(admin.ModelAdmin):
list_display = ['name', 'user', 'started_at']
class SpecAdmin(admin.ModelAdmin):
list_display = ['code', 'session', 'author', 'saved_at']
admin.site.register(Session, SessionAdmin)
admin.site.register(Spec, SpecAdmin)
|
Softmotions/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/rendering/core.py
|
208
|
"""
This module is indended to provide a pluggable way to add assertions about
the rendered content of XBlocks.
For each view on the XBlock, this module defines a @singledispatch function
that can be used to test the contents of the rendered html.
The functions are of the form:
@singledispatch
def assert_student_view_valid_html(block, html):
'''
block: The block that rendered the HTML
html: An lxml.html parse of the HTML for this block
'''
...
assert foo
...
for child in children:
assert_xblock_html(child, child_html)
@singledispatch
def assert_student_view_invalid_html(block, html):
'''
block: The block that rendered the HTML
html: A string of unparsable html
'''
...
assert foo
...
for child in children:
assert_xblock_html(child, child_html)
...
A further extension would be to provide a companion set of functions that
resources that are provided to the Fragment
"""
import lxml.html
import lxml.etree
from singledispatch import singledispatch
@singledispatch
def assert_student_view_valid_html(block, html):
"""
Asserts that the html generated by the `student_view` view is correct for
the supplied block
:param block: The :class:`XBlock` that generated the html
:param html: The generated html as parsed by lxml.html
"""
pass
@singledispatch
def assert_studio_view_valid_html(block, html):
"""
Asserts that the html generated by the `studio_view` view is correct for
the supplied block
:param block: The :class:`XBlock` that generated the html
:param html: The generated html as parsed by lxml.html
"""
pass
@singledispatch
def assert_student_view_invalid_html(block, html):
"""
Asserts that the html generated by the `student_view` view is correct for
the supplied block, given that html wasn't parsable
:param block: The :class:`XBlock` that generated the html
:param html: A string, not parseable as html
"""
assert False, "student_view should produce valid html"
@singledispatch
def assert_studio_view_invalid_html(block, html):
"""
Asserts that the html generated by the `studio_view` view is correct for
the supplied block
:param block: The :class:`XBlock` that generated the html
:param html: A string, not parseable as html
"""
assert False, "studio_view should produce valid html"
def assert_student_view(block, fragment):
"""
Helper function to assert that the `fragment` is valid output
the specified `block`s `student_view`
"""
try:
html = lxml.html.fragment_fromstring(fragment.content)
except lxml.etree.ParserError:
assert_student_view_invalid_html(block, fragment.content)
else:
assert_student_view_valid_html(block, html)
def assert_studio_view(block, fragment):
"""
Helper function to assert that the `fragment` is valid output
the specified `block`s `studio_view`
"""
try:
html = lxml.html.fragment_fromstring(fragment.content)
except lxml.etree.ParserError:
assert_studio_view_invalid_html(block, fragment.content)
else:
assert_studio_view_valid_html(block, html)
|
alexmerser/ops
|
refs/heads/master
|
buedafab/deploy/utils.py
|
4
|
"""General deployment utilities (not Fabric commands)."""
from fabric.api import cd, require, local, env
from buedafab import deploy
def make_archive():
"""Create a compressed archive of the project's repository, complete with
submodules.
TODO We used to used git-archive-all to archive the submodules as well,
since 'git archive' doesn't touch them. We reverted back at some point and
stopped using archives in our deployment strategy, so this may not work with
submodules.
"""
require('release')
require('scratch_path')
with cd(env.scratch_path):
deploy.release.make_pretty_release()
local('git checkout %(release)s' % env, capture=True)
local('git submodule update --init', capture=True)
local('git archive --prefix=%(unit)s/ --format tar '
'%(release)s | gzip > %(scratch_path)s/%(archive)s' % env,
capture=True)
def run_extra_deploy_tasks(deployed=False):
"""Run arbitrary functions listed in env.package_installation_scripts.
Each function must accept a single parameter (or just kwargs) that will
indicates if the app was deployed or already existed.
"""
require('release_path')
if not env.extra_deploy_tasks:
return
with cd(env.release_path):
for task in env.extra_deploy_tasks:
task(deployed=deployed)
|
guijomatos/SickRage
|
refs/heads/master
|
lib/sqlalchemy/testing/util.py
|
79
|
# testing/util.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..util import jython, pypy, defaultdict, decorator, py2k
import decimal
import gc
import time
import random
import sys
import types
if jython:
def jython_gc_collect(*args):
"""aggressive gc.collect for tests."""
gc.collect()
time.sleep(0.1)
gc.collect()
gc.collect()
return 0
# "lazy" gc, for VM's that don't GC on refcount == 0
gc_collect = lazy_gc = jython_gc_collect
elif pypy:
def pypy_gc_collect(*args):
gc.collect()
gc.collect()
gc_collect = lazy_gc = pypy_gc_collect
else:
# assume CPython - straight gc.collect, lazy_gc() is a pass
gc_collect = gc.collect
def lazy_gc():
pass
def picklers():
picklers = set()
if py2k:
try:
import cPickle
picklers.add(cPickle)
except ImportError:
pass
import pickle
picklers.add(pickle)
# yes, this thing needs this much testing
for pickle_ in picklers:
for protocol in -1, 0, 1, 2:
yield pickle_.loads, lambda d: pickle_.dumps(d, protocol)
def round_decimal(value, prec):
if isinstance(value, float):
return round(value, prec)
# can also use shift() here but that is 2.6 only
return (value * decimal.Decimal("1" + "0" * prec)
).to_integral(decimal.ROUND_FLOOR) / \
pow(10, prec)
class RandomSet(set):
def __iter__(self):
l = list(set.__iter__(self))
random.shuffle(l)
return iter(l)
def pop(self):
index = random.randint(0, len(self) - 1)
item = list(set.__iter__(self))[index]
self.remove(item)
return item
def union(self, other):
return RandomSet(set.union(self, other))
def difference(self, other):
return RandomSet(set.difference(self, other))
def intersection(self, other):
return RandomSet(set.intersection(self, other))
def copy(self):
return RandomSet(self)
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
This function should be phased out as much as possible
in favor of @decorator. Tests that "generate" many named tests
should be modernized.
"""
try:
fn.__name__ = name
except TypeError:
fn = types.FunctionType(fn.__code__, fn.__globals__, name,
fn.__defaults__, fn.__closure__)
return fn
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if raise_ is None:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return set([tuple(row) for row in results])
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
from . import config
from sqlalchemy import schema
metadata = schema.MetaData(config.db)
self = args[0]
prev_meta = getattr(self, 'metadata', None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
metadata.drop_all()
self.metadata = prev_meta
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def get_all(self, *keys):
return tuple([self[key] for key in keys])
|
akozumpl/anaconda
|
refs/heads/master
|
pyanaconda/ui/tui/tools/run-text-spoke.py
|
9
|
#!/usr/bin/python
import sys, os
import os.path
# Check command line arguments
if len(sys.argv)<2:
print "Usage: $0 <spoke module name> [<spoke widget class>]"
sys.exit(1)
# Logging always needs to be set up first thing, or there'll be tracebacks.
from pyanaconda import anaconda_log
anaconda_log.init()
from pyanaconda.installclass import DefaultInstall
from blivet import Blivet
from pyanaconda.threads import initThreading
from pyanaconda.packaging.yumpayload import YumPayload
from pykickstart.version import makeVersion
from pyanaconda.ui.tui.simpleline import App
from pyanaconda.ui.tui import YesNoDialog
# Don't worry with fcoe, iscsi, dasd, any of that crud.
from pyanaconda.flags import flags
flags.imageInstall = True
flags.testing = True
initThreading()
# Figure out the part we are about to show: hub/spoke?
# And get the name of the module which represents it
if os.path.basename(sys.argv[0]) == "run-text-spoke.py":
spokeModuleName = "pyanaconda.ui.tui.spokes.%s" % sys.argv[1]
from pyanaconda.ui.common import Spoke
spokeBaseClass = Spoke
spokeText = "spoke"
SpokeText = "Spoke"
elif os.path.basename(sys.argv[0]) == "run-text-hub.py":
spokeModuleName = "pyanaconda.ui.tui.hubs.%s" % sys.argv[1]
from pyanaconda.ui.common import Hub
spokeBaseClass = Hub
spokeText = "hub"
SpokeText = "Hub"
else:
print "You have to run this command as run-spoke.py or run-hub.py."
sys.exit(1)
# Set default spoke class
spokeClass = None
spokeClassName = None
# Load spoke specified on the command line
# If the spoke module was specified, but the spoke class was not,
# try to find it using class hierarchy
try:
spokeClassName = sys.argv[2]
__import__(spokeModuleName, fromlist = [spokeClassName])
spokeModule = sys.modules[spokeModuleName]
except IndexError:
__import__(spokeModuleName)
spokeModule = sys.modules[spokeModuleName]
for k,v in vars(spokeModule).iteritems():
try:
print k,v
if issubclass(v, spokeBaseClass) and v != spokeBaseClass:
spokeClassName = k
spokeClass = v
except TypeError:
pass
if not spokeClass:
try:
spokeClass = getattr(spokeModule, spokeClassName)
except KeyError:
print "%s %s could not be found in %s" % (SpokeText, spokeClassName, spokeModuleName)
sys.exit(1)
print "Running %s %s from %s" % (spokeText, spokeClass, spokeModule)
ksdata = makeVersion()
storage = Blivet(ksdata=ksdata)
storage.reset()
instclass = DefaultInstall()
app = App("TEST HARNESS", yes_or_no_question = YesNoDialog)
payload = YumPayload(ksdata)
payload.setup(storage)
payload.install_log = sys.stdout
spoke = spokeClass(app, ksdata, storage, payload, instclass)
if not spoke.showable:
print "This %s is not showable, but I'll continue anyway." % spokeText
app.schedule_screen(spoke)
app.run()
if hasattr(spoke, "status"):
print "%s status:\n%s\n" % (SpokeText, spoke.status)
if hasattr(spoke, "completed"):
print "%s completed:\n%s\n" % (SpokeText, spoke.completed)
print "%s kickstart fragment:\n%s" % (SpokeText, ksdata)
|
TomAugspurger/pandas
|
refs/heads/master
|
pandas/tests/io/parser/test_common.py
|
1
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = f"{tm.rands(10)}.csv"
msg = r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
@td.skip_if_windows # os.chmod does not work in windows
def test_no_permission(all_parsers):
# GH 23784
parser = all_parsers
msg = r"\[Errno 13\]"
with tm.ensure_clean() as path:
os.chmod(path, 0) # make file unreadable
# verify that this process cannot open the file (not running as sudo)
try:
with open(path):
pass
pytest.skip("Running as sudo.")
except PermissionError:
pass
with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser = all_parsers
for precision in parser.float_precision_choices:
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
result = next(iter(parser.read_csv(data, chunksize=nrows)))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
dict(skiprows=[2]),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#", skip_blank_lines=False),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
dict(skip_blank_lines=False),
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
dict(skip_blank_lines=False),
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
dict(escapechar="\\"),
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", dict(), None),
("", dict(usecols=["X"]), None),
(
",,",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
dict(
header=None,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True,
),
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
dict(
delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True
),
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = dict(squeeze=True, header=None)
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path, "r") as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data), sep="|", thousands=thousands, decimal=decimal
)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame(
{"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_infinity_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,Infinity
b,-Infinity
c,+Infinity
"""
expected = DataFrame(
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
index=["a", "b", "c"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame(
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
)
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = f"__{tm.rands(10)}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
for mode in ["r", "rb"]:
with open(csv1, mode) as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer:
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo:
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in.
)
@pytest.mark.parametrize(
"warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)]
)
def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
# see gh-15925
parser = all_parsers
kwargs.update(**warn_kwargs)
data = "a\n1\n1,2,3\n4\n5,6,7"
msg = "Expected 1 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_warn_bad_lines(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
assert "Skipping line 5" in captured.err
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(
StringIO(data), error_bad_lines=False, warn_bad_lines=False
)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert captured.err == ""
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_csv_memory_growth_chunksize(all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers
with tm.ensure_clean() as path:
with open(path, "w") as f:
for i in range(1000):
f.write(str(i) + "\n")
result = parser.read_csv(path, chunksize=20)
for _ in result:
pass
def test_read_csv_raises_on_header_prefix(all_parsers):
# gh-27394
parser = all_parsers
msg = "Argument prefix must be None if argument header is not None"
s = StringIO("0,1\n2,3")
with pytest.raises(ValueError, match=msg):
parser.read_csv(s, header=0, prefix="_X")
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1" "Head2" "Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
def test_integer_precision(all_parsers):
# Gh 7072
s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
parser = all_parsers
result = parser.read_csv(StringIO(s), header=None)[4]
expected = Series([4321583677327450765, 4321113141090630389], name=4)
tm.assert_series_equal(result, expected)
def test_file_descriptor_leak(all_parsers):
# GH 31488
parser = all_parsers
with tm.ensure_clean() as path:
def test():
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
parser.read_csv(path)
td.check_file_leaks(test)()
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
|
mjfarmer/scada_py
|
refs/heads/master
|
env/lib/python2.7/site-packages/pycparser/plyparser.py
|
79
|
#-----------------------------------------------------------------
# plyparser.py
#
# PLYParser class and other utilites for simplifying programming
# parsers with PLY
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
- File name
- Line number
- (optional) column number, for the Lexer
"""
__slots__ = ('file', 'line', 'column', '__weakref__')
def __init__(self, file, line, column=None):
self.file = file
self.line = line
self.column = column
def __str__(self):
str = "%s:%s" % (self.file, self.line)
if self.column: str += ":%s" % self.column
return str
class ParseError(Exception): pass
class PLYParser(object):
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
def _coord(self, lineno, column=None):
return Coord(
file=self.clex.filename,
line=lineno,
column=column)
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
|
nikolay-fedotov/tempest
|
refs/heads/master
|
tempest/openstack/common/local.py
|
378
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
|
hflynn/openmicroscopy
|
refs/heads/develop
|
components/tools/OmeroPy/src/omero/util/concurrency.py
|
4
|
#!/usr/bin/env python
#
# OMERO Concurrency Utilities
#
# Copyright 2009 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
import os
import sys
import time
import atexit
import logging
import threading
import omero.util
import exceptions
import logging.handlers
def get_event(name = "Unknown"):
"""
Returns a threading.Event instance which is registered to be
"set" (Event.set()) on system exit.
"""
event = AtExitEvent(name=name)
atexit.register(event.setAtExit)
return event
class AtExitEvent(threading._Event):
"""
threading.Event extension which provides an additional method
setAtExit() which sets "atexit" to true.
This class was introduced in 4.2.1 to work around issue #3260
in which logging from background threads produced error
messages.
"""
def __init__(self, verbose = None, name = "Unknown"):
super(AtExitEvent, self).__init__(verbose)
self.__name = name
self.__atexit = False
name = property(lambda self: self.__name)
atexit = property(lambda self: self.__atexit)
def setAtExit(self):
self.__atexit = True
super(AtExitEvent, self).set()
def __repr__(self):
return "%s (%s)" % (super(AtExitEvent, self).__repr__(), self.__name)
class Timer(threading._Timer):
"""Based on threading._Thread but allows for resetting the Timer.
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
# or
t.reset()
After excecution, the status of the run can be checked via the
"completed" and the "exception" Event instances.
"""
def __init__(self, interval, function, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
threading._Timer.__init__(self, interval, function, args, kwargs)
self.log = logging.getLogger(omero.util.make_logname(self))
self.completed = threading.Event()
self.exception = threading.Event()
self._reset = threading.Event()
def reset(self):
self.log.debug("Reset called")
self._reset.set() # Set first, so that the loop will continue
self.finished.set() # Forces waiting thread to fall through
def run(self):
while True:
self.finished.wait(self.interval)
if self._reset.isSet():
self.finished.clear()
self._reset.clear()
self.log.debug("Resetting")
continue
if not self.finished.isSet():
try:
self.log.debug("Executing")
self.function(*self.args, **self.kwargs)
self.completed.set()
self.finished.set()
except:
self.exception.set()
self.finished.set()
raise
break
|
anupsabraham/store_tv_player
|
refs/heads/master
|
django_project/store_cms/settings/__init__.py
|
12133432
| |
sfu-fas/coursys
|
refs/heads/master
|
outreach/__init__.py
|
12133432
| |
muraliselva10/cloudkitty
|
refs/heads/master
|
cloudkitty/cli/__init__.py
|
12133432
| |
victims/victims-db-builder
|
refs/heads/master
|
victims_db_builder/library.py
|
1
|
import itertools
import string
import urllib.request as urllib2
from decimal import *
from distutils.version import LooseVersion
from version import Version
class BaseLibrary(object):
def __init__(self, versionRanges):
# For soup/direct maven index:
self.versions = []
if not isinstance(versionRanges, str):
for vr in versionRanges:
self.versions.append(Version(vr))
else:
self.versions.append(Version(versionRanges))
self.versionRanges = versionRanges
import re
import logging
import configparser as ConfigParser
from bs4 import BeautifulSoup
class JavaLibrary(BaseLibrary):
def __init__(self, versionRange, groupId, artifactId):
getcontext().prec = 2
self.logger = logging.getLogger(__name__)
super(JavaLibrary, self).__init__(versionRange)
self.groupId = groupId
self.artifactId = artifactId
self.mavenVersions = set()
self.affectedMvnSeries = set()
self.configure()
self.findAllInSeries()
def configure(self):
config = ConfigParser.ConfigParser()
config.read('victims-db-builder.cfg')
repos = config.items('java_repos')
print("repos: %s" % repos)
for repo, url in repos:
try:
self.logger.debug('repo: %s' % repo)
self.indexBaseUrl = url
self.confirmVersions()
except Exception as err:
self.logger.warn(err)
self.logger.warn('Processing of repo %s, skipping.' % repo)
continue
def confirmVersions(self):
coords = self.indexBaseUrl + self.groupId.replace('.', '/') + "/" + str(self.artifactId)
self.logger.debug("coords %s", coords)
try:
response = urllib2.urlopen(coords)
except urllib2.URLError as e:
if response.code is 404:
pass
self.findInMaven(response, coords)
def findInMaven(self, response, coords):
# TODO cache page locally for redundency
mavenPage = response.read()
soup = BeautifulSoup(mavenPage, 'html.parser')
links = soup.find_all('a')
for link in links:
txt = link.get_text().rstrip('/')
url = coords + "/" + str(txt) + "/" + str(self.artifactId) + "-" + str(txt) + ".jar"
self.mavenVersions.add((txt, url))
def findAllInSeries(self):
verList = []
regex = ['(,)(\\d+)(\\.)(\\d+)', '(,)(\\d+)']
for val in self.versionRanges:
# removing the boundary version if exists
normalized = None
boundary = None
for ind, value in enumerate(regex):
res = re.compile(value)
matched = res.search(val)
if matched is not None and ind == 0:
normalized = val.replace(
str(matched.group(1) + matched.group(2) + matched.group(3) + matched.group(4)), '')
tmp = str(matched.group(1) + matched.group(2) + matched.group(3) + matched.group(4))
boundary = tmp.replace(',', '')
break
if matched is not None and ind == 1:
normalized = val.replace(str(matched.group(1) + matched.group(2)), '')
tmp = str(matched.group(1) + matched.group(2))
boundary = tmp.replace(',', '')
break
else:
normalized = val
if '>=' in normalized:
verList.append(StructureHelper('>=', normalized.replace('>=', ''), boundary))
if '<=' in normalized:
verList.append(StructureHelper('<=', normalized.replace('<=', ''), boundary))
if '<' in normalized and '=' not in normalized:
verList.append(StructureHelper('<', normalized.replace('<', ''), boundary))
if '>' in normalized and '=' not in normalized:
verList.append(StructureHelper('>', normalized.replace('>', ''), boundary))
if '==' in normalized:
verList.append(StructureHelper('==', normalized.replace('==', ''), boundary))
equalsFound = set()
links = []
self.findEqualVersions(verList, 0, equalsFound, links)
finalVersionRanges = []
if len(links) != 0:
for each in links:
versionRange = []
for ea in each.links:
originalVerListValue = verList[ea]
versionRange.append(originalVerListValue.symbol + originalVerListValue.version)
versionRange.append(originalVerListValue.boundary)
versionRange.append(each.symbol + each.version)
finalVersionRanges.append(EqualBaseVersion(versionRange))
else:
for each in verList:
versionRange = []
versionRange.append(each.symbol + each.version)
versionRange.append(each.boundary)
finalVersionRanges.append(EqualBaseVersion(versionRange))
self.findAllArtifacts(finalVersionRanges)
# Building the relationship between affected versions in case any version
# lives between two other versions without
def findEqualVersions(self, ver, inx, equalsList, links):
indx = inx
highIndex = len(ver) - 1
equalVer = ver[indx]
try:
if indx >= highIndex:
return equalsList
for index, var in enumerate(ver):
if index <= highIndex and index is indx:
continue
if isinstance(var, StructureHelper) and isinstance(equalVer, StructureHelper):
# Striping the third precision to compare the base versions
if self.normalizeText(equalVer.version) == self.normalizeText(var.version):
if len(links) != 0:
for ix, value in enumerate(links):
if self.normalizeText(equalVer.version) == self.normalizeText(value.version):
if not any(eq == indx for eq in value.links):
structureObject = links[ix]
if isinstance(structureObject, StructureHelper):
structureObject.addToLinks(index)
elif ix == len(links) - 1:
self.addStructureToLinks(equalVer, index, links)
else:
continue
else:
self.addStructureToLinks(equalVer, index, links)
self.findEqualVersions(ver, indx + 1, equalsList, links)
except Exception as e:
self.logger.error("Error occurred while building affected versions relationship", str(e))
def addStructureToLinks(self, equalVer, index, links):
if equalVer.symbol == '>=':
c = StructureHelper('>=', equalVer.version, equalVer.boundary)
c.addToLinks(index)
links.append(c)
if equalVer.symbol == '<=':
c = StructureHelper('<=', equalVer.version, equalVer.boundary)
c.addToLinks(index)
links.append(c)
if equalVer.symbol == '==':
c = StructureHelper('==', equalVer.version, equalVer.boundary)
c.addToLinks(index)
links.append(c)
if equalVer.symbol == '>':
c = StructureHelper('>', equalVer.version, equalVer.boundary)
c.addToLinks(index)
links.append(c)
if equalVer.symbol == '<':
c = StructureHelper('<', equalVer.version, equalVer.boundary)
c.addToLinks(index)
links.append(c)
def normalizeText(self, text):
if text is not None:
regex = '[0-9]+\.[0-9]+'
res = re.compile(regex)
matched = res.search(text)
return matched.group(0)
def findAllArtifacts(self, translatedVersions):
regex = '[0-9](\\.)'
if len(self.mavenVersions) == 0:
self.logger.warn('acquired maven artifacts is empty')
if len(translatedVersions) != 0:
for version in translatedVersions:
for mvn, url in self.mavenVersions:
res = re.compile(regex)
matched = res.search(mvn)
if matched is None:
continue
mavenSuffix = []
found = False
comparableVersion = ''
for char in mvn:
if found is not True:
if char == '.':
comparableVersion += char
continue
try:
integerChar = int(char)
comparableVersion += str(integerChar)
except ValueError:
mavenSuffix.append(char)
found = True
else:
mavenSuffix.append(char)
attachedSuffix = ''
for su in mavenSuffix:
attachedSuffix += str(su)
if version.boundary is not None and comparableVersion is not '':
# Case where boundary version is specified as one digit i.e 9
if '.' not in version.boundary and version.boundary == self.getBoundary(comparableVersion):
self.compareVersions(attachedSuffix, comparableVersion, version, url)
# Case where boundary version is specified with decimal point i.e 9.2
if '.' in version.boundary and version.boundary == self.normalizeText(
comparableVersion):
# Case where affected versions are between to versions
if version.greaterThanOrEqualTo is not None and version.lessThanOrEqualTo is not None:
if (LooseVersion(comparableVersion) == LooseVersion(
version.greaterThanOrEqualTo.replace('<=', '')) or
(LooseVersion(comparableVersion) < LooseVersion(
version.greaterThanOrEqualTo.replace('<=', ''))
and LooseVersion(comparableVersion) > LooseVersion(
version.lessThanOrEqualTo.replace('>=', '')))) and \
(LooseVersion(comparableVersion) == LooseVersion(
version.lessThanOrEqualTo.replace('>=', '')) or
(LooseVersion(comparableVersion) > LooseVersion(
version.lessThanOrEqualTo.replace('>=', '')) and
LooseVersion(comparableVersion) < LooseVersion(
version.greaterThanOrEqualTo.replace('<=', '')))):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
self.compareVersions(attachedSuffix, comparableVersion, version, url)
elif comparableVersion is not '':
self.compareVersions(attachedSuffix, comparableVersion, version, url)
else:
self.logger.warn('either affected version range is unavailable')
def getBoundary(self, normalizedText):
regex = '[0-9]+'
res = re.compile(regex)
matched = res.search(normalizedText)
return matched.group(0)
def populatedAffectedLibraries(self, attachedSuffix, comparableVersion, url):
self.affectedMvnSeries.add(
AffectedJavaLibrary(self.groupId, self.artifactId, str(comparableVersion + attachedSuffix), url))
def compareVersions(self, attachedSuffix, comparableVersion, version, url):
if version.equal is not None:
if LooseVersion(version.equal.replace('==', '')) == LooseVersion(comparableVersion):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
if version.greaterThanOrEqualTo is not None and version.lessThanOrEqualTo is None:
if LooseVersion(comparableVersion) == LooseVersion(version.greaterThanOrEqualTo.replace('<=', '')) or \
LooseVersion(comparableVersion) < LooseVersion(
version.greaterThanOrEqualTo.replace('<=', '')):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
if version.lessThanOrEqualTo is not None and version.greaterThanOrEqualTo is None:
if LooseVersion(comparableVersion) == LooseVersion(version.lessThanOrEqualTo.replace('>=', '')) or \
LooseVersion(comparableVersion) > LooseVersion(version.lessThanOrEqualTo.replace('>=', '')):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
if version.greaterThan is not None:
if LooseVersion(comparableVersion) < LooseVersion(version.greaterThan.replace('<', '')):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
if version.lessThan is not None:
if LooseVersion(comparableVersion) > LooseVersion(version.lessThan.replace('>', '')):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
# Case where an affected version is between two other versions
if version.lessThan is not None and version.greaterThan is not None:
if LooseVersion(comparableVersion) < LooseVersion(version.greaterThan.replace('<', '')) and \
LooseVersion(comparableVersion) > LooseVersion(version.lessThan.replace('>', '')):
self.populatedAffectedLibraries(attachedSuffix, comparableVersion, url)
class AffectedJavaLibrary:
def __init__(self, groupId, artifactId, version, url=None):
self.groupId = groupId
self.artifactId = artifactId
self.version = version
self.url = url
class EqualBaseVersion:
def __init__(self, *args):
self.equal = None
self.lessThanOrEqualTo = None
self.greaterThanOrEqualTo = None
self.lessThan = None
self.greaterThan = None
self.boundary = None
for arg in args:
for each in arg:
if each is not None:
if '==' in each:
self.equal = each
elif '>=' in each:
self.lessThanOrEqualTo = each
elif '<=' in each:
self.greaterThanOrEqualTo = each
elif '>' in each and '=' not in each:
self.lessThan = each
elif '<' in each and '=' not in each:
self.greaterThan = each
else:
self.boundary = each
class StructureHelper:
def __init__(self, symbol, version, boundary):
self.symbol = symbol
self.version = version
self.boundary = boundary
self.links = set()
def addToLinks(self, link):
self.links.add(link)
|
skilstak/dk-help-your-kids-with-computer-coding
|
refs/heads/master
|
bubble-blaster/04.py
|
1
|
# despite what the book says it's actually bad practice to use either:
#from tkinter import *
#from tkinter import Tk
import tkinter
HEIGHT = 500
WIDTH = 800
window = tkinter.Tk()
window.title('Bubble Blaster')
c = tkinter.Canvas(window, width=WIDTH, height=HEIGHT, bg='darkblue')
c.pack()
ship_id = c.create_polygon(5,5,5,25,30,15,fill='red')
ship_id2 = c.create_oval(0,0,30,30,outline='red')
SHIP_RADIUS = 15
MID_X = WIDTH / 2
MID_Y = HEIGHT / 2
c.move(ship_id,MID_X,MID_Y)
c.move(ship_id2,MID_X,MID_Y)
# don't abbreviate, it's bad style and not 'pythonic'
SHIP_SPEED = 10
def move_ship(event):
if event.keysym == 'Up':
c.move(ship_id,0,-SHIP_SPEED)
c.move(ship_id2,0,-SHIP_SPD)
elif event.keysym == 'Down':
c.move(ship_id,0,SHIP_SPD)
c.move(ship_id2,0,SHIP_SPD)
elif event.keysym == 'Left':
c.move(ship_id,-SHIP_SPD,0)
c.move(ship_id2,-SHIP_SPD,0)
elif event.keysym == 'Right':
c.move(ship_id,SHIP_SPD,0)
c.move(ship_id2,SHIP_SPD,0)
c.bind_all('<Key>',move_ship)
import random
bubble_id = list()
bubble_radius = list()
bubble_speed = list()
MIN_BUBBLE_RADIUS = 10
MAX_BUBBLE_RADIUS = 30
MAX_BUBBLE_SPEED = 10
GAP = 100
def create_bubble():
x = WIDTH + GAP
y = random.randint(0, HEIGHT)
r = random.randint(MIN_BUBBLE_RADIUS, MAX_BUBBLE_RADIUS)
id1 = c.create.oval(x-r, y-r, x+r, y+r, outline='white')
bubble_id.append(id1)
bubble_radius.append(r)
bubble_speed.append(random.randint(1,MAX_BUBBLE_SPEED))
# easy way to keep the window open
window.mainloop()
|
prds21/barrial-movie
|
refs/heads/master
|
lib/mechanize/_markupbase.py
|
134
|
# Taken from Python 2.6.4 for use by _sgmllib.py
"""Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
nsrchemie/code_guild
|
refs/heads/master
|
wk2/extras/linked_lists/linked_list/test_linked_list.py
|
6
|
from nose.tools import assert_equal
class TestLinkedList(object):
def test_insert_to_front(self):
print('Test: insert_to_front on an empty list')
linked_list = LinkedList(None)
linked_list.insert_to_front(10)
assert_equal(linked_list.get_all_data(), [10])
print('Test: insert_to_front on a None')
linked_list.insert_to_front(None)
assert_equal(linked_list.get_all_data(), [10])
print('Test: insert_to_front general case')
linked_list.insert_to_front('a')
linked_list.insert_to_front('bc')
assert_equal(linked_list.get_all_data(), ['bc', 'a', 10])
print('Success: test_insert_to_front\n')
def test_append(self):
print('Test: append on an empty list')
linked_list = LinkedList(None)
linked_list.append(10)
assert_equal(linked_list.get_all_data(), [10])
print('Test: append a None')
linked_list.append(None)
assert_equal(linked_list.get_all_data(), [10])
print('Test: append general case')
linked_list.append('a')
linked_list.append('bc')
assert_equal(linked_list.get_all_data(), [10, 'a', 'bc'])
print('Success: test_append\n')
def test_find(self):
print('Test: find on an empty list')
linked_list = LinkedList(None)
node = linked_list.find('a')
assert_equal(node, None)
print('Test: find a None')
head = Node(10)
linked_list = LinkedList(head)
node = linked_list.find(None)
assert_equal(node, None)
print('Test: find general case with matches')
head = Node(10)
linked_list = LinkedList(head)
linked_list.insert_to_front('a')
linked_list.insert_to_front('bc')
node = linked_list.find('a')
assert_equal(str(node), 'a')
print('Test: find general case with no matches')
node = linked_list.find('aaa')
assert_equal(node, None)
print('Success: test_find\n')
def test_delete(self):
print('Test: delete on an empty list')
linked_list = LinkedList(None)
linked_list.delete('a')
assert_equal(linked_list.get_all_data(), [])
print('Test: delete a None')
head = Node(10)
linked_list = LinkedList(head)
linked_list.delete(None)
assert_equal(linked_list.get_all_data(), [10])
print('Test: delete general case with matches')
head = Node(10)
linked_list = LinkedList(head)
linked_list.insert_to_front('a')
linked_list.insert_to_front('bc')
linked_list.delete('a')
assert_equal(linked_list.get_all_data(), ['bc', 10])
print('Test: delete general case with no matches')
linked_list.delete('aa')
assert_equal(linked_list.get_all_data(), ['bc', 10])
print('Success: test_delete\n')
def test_len(self):
print('Test: len on an empty list')
linked_list = LinkedList(None)
assert_equal(len(linked_list), 0)
print('Test: len general case')
head = Node(10)
linked_list = LinkedList(head)
linked_list.insert_to_front('a')
linked_list.insert_to_front('bc')
assert_equal(len(linked_list), 3)
print('Success: test_len\n')
def main():
test = TestLinkedList()
test.test_insert_to_front()
test.test_append()
test.test_find()
test.test_delete()
test.test_len()
if __name__ == '__main__':
main()
|
mhnatiuk/phd_sociology_of_religion
|
refs/heads/master
|
scrapper/lib/python2.7/site-packages/twisted/test/test_process.py
|
26
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test running processes.
"""
import gzip
import os
import sys
import signal
import StringIO
import errno
import gc
import stat
import operator
try:
import fcntl
except ImportError:
fcntl = process = None
else:
from twisted.internet import process
from zope.interface.verify import verifyObject
from twisted.python.log import msg
from twisted.internet import reactor, protocol, error, interfaces, defer
from twisted.trial import unittest
from twisted.python import util, runtime, procutils
class StubProcessProtocol(protocol.ProcessProtocol):
"""
ProcessProtocol counter-implementation: all methods on this class raise an
exception, so instances of this may be used to verify that only certain
methods are called.
"""
def outReceived(self, data):
raise NotImplementedError()
def errReceived(self, data):
raise NotImplementedError()
def inConnectionLost(self):
raise NotImplementedError()
def outConnectionLost(self):
raise NotImplementedError()
def errConnectionLost(self):
raise NotImplementedError()
class ProcessProtocolTests(unittest.TestCase):
"""
Tests for behavior provided by the process protocol base class,
L{protocol.ProcessProtocol}.
"""
def test_interface(self):
"""
L{ProcessProtocol} implements L{IProcessProtocol}.
"""
verifyObject(interfaces.IProcessProtocol, protocol.ProcessProtocol())
def test_outReceived(self):
"""
Verify that when stdout is delivered to
L{ProcessProtocol.childDataReceived}, it is forwarded to
L{ProcessProtocol.outReceived}.
"""
received = []
class OutProtocol(StubProcessProtocol):
def outReceived(self, data):
received.append(data)
bytes = "bytes"
p = OutProtocol()
p.childDataReceived(1, bytes)
self.assertEqual(received, [bytes])
def test_errReceived(self):
"""
Similar to L{test_outReceived}, but for stderr.
"""
received = []
class ErrProtocol(StubProcessProtocol):
def errReceived(self, data):
received.append(data)
bytes = "bytes"
p = ErrProtocol()
p.childDataReceived(2, bytes)
self.assertEqual(received, [bytes])
def test_inConnectionLost(self):
"""
Verify that when stdin close notification is delivered to
L{ProcessProtocol.childConnectionLost}, it is forwarded to
L{ProcessProtocol.inConnectionLost}.
"""
lost = []
class InLostProtocol(StubProcessProtocol):
def inConnectionLost(self):
lost.append(None)
p = InLostProtocol()
p.childConnectionLost(0)
self.assertEqual(lost, [None])
def test_outConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stdout.
"""
lost = []
class OutLostProtocol(StubProcessProtocol):
def outConnectionLost(self):
lost.append(None)
p = OutLostProtocol()
p.childConnectionLost(1)
self.assertEqual(lost, [None])
def test_errConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stderr.
"""
lost = []
class ErrLostProtocol(StubProcessProtocol):
def errConnectionLost(self):
lost.append(None)
p = ErrLostProtocol()
p.childConnectionLost(2)
self.assertEqual(lost, [None])
class TrivialProcessProtocol(protocol.ProcessProtocol):
"""
Simple process protocol for tests purpose.
@ivar outData: data received from stdin
@ivar errData: data received from stderr
"""
def __init__(self, d):
"""
Create the deferred that will be fired at the end, and initialize
data structures.
"""
self.deferred = d
self.outData = []
self.errData = []
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
def outReceived(self, data):
self.outData.append(data)
def errReceived(self, data):
self.errData.append(data)
class TestProcessProtocol(protocol.ProcessProtocol):
def connectionMade(self):
self.stages = [1]
self.data = ''
self.err = ''
self.transport.write("abcd")
def childDataReceived(self, childFD, data):
"""
Override and disable the dispatch provided by the base class to ensure
that it is really this method which is being called, and the transport
is not going directly to L{outReceived} or L{errReceived}.
"""
if childFD == 1:
self.data += data
elif childFD == 2:
self.err += data
def childConnectionLost(self, childFD):
"""
Similarly to L{childDataReceived}, disable the automatic dispatch
provided by the base implementation to verify that the transport is
calling this method directly.
"""
if childFD == 1:
self.stages.append(2)
if self.data != "abcd":
raise RuntimeError(
"Data was %r instead of 'abcd'" % (self.data,))
self.transport.write("1234")
elif childFD == 2:
self.stages.append(3)
if self.err != "1234":
raise RuntimeError(
"Err was %r instead of '1234'" % (self.err,))
self.transport.write("abcd")
self.stages.append(4)
elif childFD == 0:
self.stages.append(5)
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class EchoProtocol(protocol.ProcessProtocol):
s = "1234567" * 1001
n = 10
finished = 0
failure = None
def __init__(self, onEnded):
self.onEnded = onEnded
self.count = 0
def connectionMade(self):
assert self.n > 2
for i in range(self.n - 2):
self.transport.write(self.s)
# test writeSequence
self.transport.writeSequence([self.s, self.s])
self.buffer = self.s * self.n
def outReceived(self, data):
if buffer(self.buffer, self.count, len(data)) != buffer(data):
self.failure = ("wrong bytes received", data, self.count)
self.transport.closeStdin()
else:
self.count += len(data)
if self.count == len(self.buffer):
self.transport.closeStdin()
def processEnded(self, reason):
self.finished = 1
if not reason.check(error.ProcessDone):
self.failure = "process didn't terminate normally: " + str(reason)
self.onEnded.callback(self)
class SignalProtocol(protocol.ProcessProtocol):
"""
A process protocol that sends a signal when data is first received.
@ivar deferred: deferred firing on C{processEnded}.
@type deferred: L{defer.Deferred}
@ivar signal: the signal to send to the process.
@type signal: C{str}
@ivar signaled: A flag tracking whether the signal has been sent to the
child or not yet. C{False} until it is sent, then C{True}.
@type signaled: C{bool}
"""
def __init__(self, deferred, sig):
self.deferred = deferred
self.signal = sig
self.signaled = False
def outReceived(self, data):
"""
Handle the first output from the child process (which indicates it
is set up and ready to receive the signal) by sending the signal to
it. Also log all output to help with debugging.
"""
msg("Received %r from child stdout" % (data,))
if not self.signaled:
self.signaled = True
self.transport.signalProcess(self.signal)
def errReceived(self, data):
"""
Log all data received from the child's stderr to help with
debugging.
"""
msg("Received %r from child stderr" % (data,))
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to C{None},
C{signal} set to C{self.signal}, and C{status} holding the status code
of the exited process. Otherwise, errback with a C{ValueError}
describing the problem.
"""
msg("Child exited: %r" % (reason.getTraceback(),))
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if isinstance(self.signal, str):
signalValue = getattr(signal, 'SIG' + self.signal)
else:
signalValue = self.signal
if v.exitCode is not None:
return self.deferred.errback(
ValueError("SIG%s: exitCode is %s, not None" %
(self.signal, v.exitCode)))
if v.signal != signalValue:
return self.deferred.errback(
ValueError("SIG%s: .signal was %s, wanted %s" %
(self.signal, v.signal, signalValue)))
if os.WTERMSIG(v.status) != signalValue:
return self.deferred.errback(
ValueError('SIG%s: %s' % (self.signal, os.WTERMSIG(v.status))))
self.deferred.callback(None)
class TestManyProcessProtocol(TestProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
def processEnded(self, reason):
self.reason = reason
if reason.check(error.ProcessDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class UtilityProcessProtocol(protocol.ProcessProtocol):
"""
Helper class for launching a Python process and getting a result from it.
@ivar program: A string giving a Python program for the child process to
run.
"""
program = None
def run(cls, reactor, argv, env):
"""
Run a Python process connected to a new instance of this protocol
class. Return the protocol instance.
The Python process is given C{self.program} on the command line to
execute, in addition to anything specified by C{argv}. C{env} is
the complete environment.
"""
exe = sys.executable
self = cls()
reactor.spawnProcess(
self, exe, [exe, "-c", self.program] + argv, env=env)
return self
run = classmethod(run)
def __init__(self):
self.bytes = []
self.requests = []
def parseChunks(self, bytes):
"""
Called with all bytes received on stdout when the process exits.
"""
raise NotImplementedError()
def getResult(self):
"""
Return a Deferred which will fire with the result of L{parseChunks}
when the child process exits.
"""
d = defer.Deferred()
self.requests.append(d)
return d
def _fireResultDeferreds(self, result):
"""
Callback all Deferreds returned up until now by L{getResult}
with the given result object.
"""
requests = self.requests
self.requests = None
for d in requests:
d.callback(result)
def outReceived(self, bytes):
"""
Accumulate output from the child process in a list.
"""
self.bytes.append(bytes)
def processEnded(self, reason):
"""
Handle process termination by parsing all received output and firing
any waiting Deferreds.
"""
self._fireResultDeferreds(self.parseChunks(self.bytes))
class GetArgumentVector(UtilityProcessProtocol):
"""
Protocol which will read a serialized argv from a process and
expose it to interested parties.
"""
program = (
"from sys import stdout, argv\n"
"stdout.write(chr(0).join(argv))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving the argv of that process. Return this as a list of
str objects.
"""
return ''.join(chunks).split('\0')
class GetEnvironmentDictionary(UtilityProcessProtocol):
"""
Protocol which will read a serialized environment dict from a process
and expose it to interested parties.
"""
program = (
"from sys import stdout\n"
"from os import environ\n"
"items = environ.iteritems()\n"
"stdout.write(chr(0).join([k + chr(0) + v for k, v in items]))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving key value pairs of the environment from that process.
Return this as a dictionary.
"""
environString = ''.join(chunks)
if not environString:
return {}
environ = iter(environString.split('\0'))
d = {}
while 1:
try:
k = environ.next()
except StopIteration:
break
else:
v = environ.next()
d[k] = v
return d
class ProcessTestCase(unittest.TestCase):
"""Test running a process."""
usePTY = False
def testStdio(self):
"""twisted.internet.stdio test."""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_twisted.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=env,
path=None, usePTY=self.usePTY)
p.transport.write("hello, world")
p.transport.write("abc")
p.transport.write("123")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.outF.getvalue(), "hello, worldabc123",
"Output follows:\n"
"%s\n"
"Error message from process_twisted follows:\n"
"%s\n" % (p.outF.getvalue(), p.errF.getvalue()))
return d.addCallback(processEnded)
def test_unsetPid(self):
"""
Test if pid is None/non-None before/after process termination. This
reuses process_echoer.py to get a process that blocks on stdin.
"""
finished = defer.Deferred()
p = TrivialProcessProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
procTrans = reactor.spawnProcess(p, exe,
[exe, scriptPath], env=None)
self.failUnless(procTrans.pid)
def afterProcessEnd(ignored):
self.assertEqual(procTrans.pid, None)
p.transport.closeStdin()
return finished.addCallback(afterProcessEnd)
def test_process(self):
"""
Test running a process: check its output, it exitCode, some property of
signalProcess.
"""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
d = defer.Deferred()
p = TestProcessProtocol()
p.deferred = d
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
def check(ignored):
self.assertEqual(p.stages, [1, 2, 3, 4, 5])
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
# would .signal be available on non-posix?
# self.assertEqual(f.value.signal, None)
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'INT')
try:
import process_tester, glob
for f in glob.glob(process_tester.test_file_match):
os.remove(f)
except:
pass
d.addCallback(check)
return d
def testManyProcesses(self):
def _check(results, protocols):
for p in protocols:
self.assertEqual(p.stages, [1, 2, 3, 4, 5], "[%d] stages = %s" % (id(p.transport), str(p.stages)))
# test status code
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
args = [exe, "-u", scriptPath]
protocols = []
deferreds = []
for i in xrange(50):
p = TestManyProcessProtocol()
protocols.append(p)
reactor.spawnProcess(p, exe, args, env=None)
deferreds.append(p.deferred)
deferredList = defer.DeferredList(deferreds, consumeErrors=True)
deferredList.addCallback(_check, protocols)
return deferredList
def test_echo(self):
"""
A spawning a subprocess which echoes its stdin to its stdout via
C{reactor.spawnProcess} will result in that echoed output being
delivered to outReceived.
"""
finished = defer.Deferred()
p = EchoProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
reactor.spawnProcess(p, exe, [exe, scriptPath], env=None)
def asserts(ignored):
self.failIf(p.failure, p.failure)
self.failUnless(hasattr(p, 'buffer'))
self.assertEqual(len(''.join(p.buffer)), len(p.s * p.n))
def takedownProcess(err):
p.transport.closeStdin()
return err
return finished.addCallback(asserts).addErrback(takedownProcess)
def testCommandLine(self):
args = [r'a\"b ', r'a\b ', r' a\\"b', r' a\\b', r'"foo bar" "', '\tab', '"\\', 'a"b', "a'b"]
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath]+args, env=None,
path=None)
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "")
recvdArgs = p.outF.getvalue().splitlines()
self.assertEqual(recvdArgs, args)
return d.addCallback(processEnded)
def test_wrongArguments(self):
"""
Test invalid arguments to spawnProcess: arguments and environment
must only contains string or unicode, and not null bytes.
"""
exe = sys.executable
p = protocol.ProcessProtocol()
badEnvs = [
{"foo": 2},
{"foo": "egg\0a"},
{3: "bar"},
{"bar\0foo": "bar"}]
badArgs = [
[exe, 2],
"spam",
[exe, "foo\0bar"]]
# Sanity check - this will fail for people who have mucked with
# their site configuration in a stupid way, but there's nothing we
# can do about that.
badUnicode = u'\N{SNOWMAN}'
try:
badUnicode.encode(sys.getdefaultencoding())
except UnicodeEncodeError:
# Okay, that unicode doesn't encode, put it in as a bad environment
# key.
badEnvs.append({badUnicode: 'value for bad unicode key'})
badEnvs.append({'key for bad unicode value': badUnicode})
badArgs.append([exe, badUnicode])
else:
# It _did_ encode. Most likely, Gtk2 is being used and the
# default system encoding is UTF-8, which can encode anything.
# In any case, if implicit unicode -> str conversion works for
# that string, we can't test that TypeError gets raised instead,
# so just leave it off.
pass
for env in badEnvs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, [exe, "-c", ""], env=env)
for args in badArgs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, args, env=None)
# Use upper-case so that the environment key test uses an upper case
# name: some versions of Windows only support upper case environment
# variable names, and I think Python (as of 2.5) doesn't use the right
# syscall for lowercase or mixed case names to work anyway.
okayUnicode = u"UNICODE"
encodedValue = "UNICODE"
def _deprecatedUnicodeSupportTest(self, processProtocolClass, argv=[], env={}):
"""
Check that a deprecation warning is emitted when passing unicode to
spawnProcess for an argv value or an environment key or value.
Check that the warning is of the right type, has the right message,
and refers to the correct file. Unfortunately, don't check that the
line number is correct, because that is too hard for me to figure
out.
@param processProtocolClass: A L{UtilityProcessProtocol} subclass
which will be instantiated to communicate with the child process.
@param argv: The argv argument to spawnProcess.
@param env: The env argument to spawnProcess.
@return: A Deferred which fires when the test is complete.
"""
# Sanity to check to make sure we can actually encode this unicode
# with the default system encoding. This may be excessively
# paranoid. -exarkun
self.assertEqual(
self.okayUnicode.encode(sys.getdefaultencoding()),
self.encodedValue)
p = self.assertWarns(DeprecationWarning,
"Argument strings and environment keys/values passed to "
"reactor.spawnProcess should be str, not unicode.", __file__,
processProtocolClass.run, reactor, argv, env)
return p.getResult()
def test_deprecatedUnicodeArgvSupport(self):
"""
Test that a unicode string passed for an argument value is allowed
if it can be encoded with the default system encoding, but that a
deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(GetArgumentVector, argv=[self.okayUnicode])
def gotArgVector(argv):
self.assertEqual(argv, ['-c', self.encodedValue])
d.addCallback(gotArgVector)
return d
def test_deprecatedUnicodeEnvKeySupport(self):
"""
Test that a unicode string passed for the key of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.okayUnicode: self.encodedValue})
def gotEnvironment(environ):
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
def test_deprecatedUnicodeEnvValueSupport(self):
"""
Test that a unicode string passed for the value of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.encodedValue: self.okayUnicode})
def gotEnvironment(environ):
# On Windows, the environment contains more things than we
# specified, so only make sure that at least the key we wanted
# is there, rather than testing the dictionary for exact
# equality.
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
class TwoProcessProtocol(protocol.ProcessProtocol):
num = -1
finished = 0
def __init__(self):
self.deferred = defer.Deferred()
def outReceived(self, data):
pass
def processEnded(self, reason):
self.finished = 1
self.deferred.callback(None)
class TestTwoProcessesBase:
def setUp(self):
self.processes = [None, None]
self.pp = [None, None]
self.done = 0
self.verbose = 0
def createProcesses(self, usePTY=0):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_reader.py")
for num in (0,1):
self.pp[num] = TwoProcessProtocol()
self.pp[num].num = num
p = reactor.spawnProcess(self.pp[num],
exe, [exe, "-u", scriptPath], env=None,
usePTY=usePTY)
self.processes[num] = p
def close(self, num):
if self.verbose: print "closing stdin [%d]" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
p.loseConnection()
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def _onClose(self):
return defer.gatherResults([ p.deferred for p in self.pp ])
def testClose(self):
if self.verbose: print "starting processes"
self.createProcesses()
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
class TestTwoProcessesNonPosix(TestTwoProcessesBase, unittest.TestCase):
pass
class TestTwoProcessesPosix(TestTwoProcessesBase, unittest.TestCase):
def tearDown(self):
for pp, pr in zip(self.pp, self.processes):
if not pp.finished:
try:
os.kill(pr.pid, signal.SIGTERM)
except OSError:
# If the test failed the process may already be dead
# The error here is only noise
pass
return self._onClose()
def kill(self, num):
if self.verbose: print "kill [%d] with SIGTERM" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
os.kill(p.pid, signal.SIGTERM)
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def testKill(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=0)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
def testClosePty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
def testKillPty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
class FDChecker(protocol.ProcessProtocol):
state = 0
data = ""
failed = None
def __init__(self, d):
self.deferred = d
def fail(self, why):
self.failed = why
self.deferred.callback(None)
def connectionMade(self):
self.transport.writeToChild(0, "abcd")
self.state = 1
def childDataReceived(self, childFD, data):
if self.state == 1:
if childFD != 1:
self.fail("read '%s' on fd %d (not 1) during state 1" \
% (childFD, data))
return
self.data += data
#print "len", len(self.data)
if len(self.data) == 6:
if self.data != "righto":
self.fail("got '%s' on fd1, expected 'righto'" \
% self.data)
return
self.data = ""
self.state = 2
#print "state2", self.state
self.transport.writeToChild(3, "efgh")
return
if self.state == 2:
self.fail("read '%s' on fd %s during state 2" % (childFD, data))
return
if self.state == 3:
if childFD != 1:
self.fail("read '%s' on fd %s (not 1) during state 3" \
% (childFD, data))
return
self.data += data
if len(self.data) == 6:
if self.data != "closed":
self.fail("got '%s' on fd1, expected 'closed'" \
% self.data)
return
self.state = 4
return
if self.state == 4:
self.fail("read '%s' on fd %s during state 4" % (childFD, data))
return
def childConnectionLost(self, childFD):
if self.state == 1:
self.fail("got connectionLost(%d) during state 1" % childFD)
return
if self.state == 2:
if childFD != 4:
self.fail("got connectionLost(%d) (not 4) during state 2" \
% childFD)
return
self.state = 3
self.transport.closeChildFD(5)
return
def processEnded(self, status):
rc = status.value.exitCode
if self.state != 4:
self.fail("processEnded early, rc %d" % rc)
return
if status.value.signal != None:
self.fail("processEnded with signal %s" % status.value.signal)
return
if rc != 0:
self.fail("processEnded with rc %d" % rc)
return
self.deferred.callback(None)
class FDTest(unittest.TestCase):
def testFD(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_fds.py")
d = defer.Deferred()
p = FDChecker(d)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={0:"w", 1:"r", 2:2,
3:"w", 4:"r", 5:"w"})
d.addCallback(lambda x : self.failIf(p.failed, p.failed))
return d
def testLinger(self):
# See what happens when all the pipes close before the process
# actually stops. This test *requires* SIGCHLD catching to work,
# as there is no other way to find out the process is done.
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_linger.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={1:"r", 2:2},
)
def processEnded(ign):
self.assertEqual(p.outF.getvalue(),
"here is some text\ngoodbye\n")
return d.addCallback(processEnded)
class Accumulator(protocol.ProcessProtocol):
"""Accumulate data from a process."""
closed = 0
endedDeferred = None
def connectionMade(self):
self.outF = StringIO.StringIO()
self.errF = StringIO.StringIO()
def outReceived(self, d):
self.outF.write(d)
def errReceived(self, d):
self.errF.write(d)
def outConnectionLost(self):
pass
def errConnectionLost(self):
pass
def processEnded(self, reason):
self.closed = 1
if self.endedDeferred is not None:
d, self.endedDeferred = self.endedDeferred, None
d.callback(None)
class PosixProcessBase:
"""
Test running processes.
"""
usePTY = False
def getCommand(self, commandName):
"""
Return the path of the shell command named C{commandName}, looking at
common locations.
"""
if os.path.exists('/bin/%s' % (commandName,)):
cmd = '/bin/%s' % (commandName,)
elif os.path.exists('/usr/bin/%s' % (commandName,)):
cmd = '/usr/bin/%s' % (commandName,)
else:
raise RuntimeError(
"%s not found in /bin or /usr/bin" % (commandName,))
return cmd
def testNormalTermination(self):
cmd = self.getCommand('true')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['true'], env=None,
usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessDone)
self.assertEqual(p.reason.value.exitCode, 0)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def test_abnormalTermination(self):
"""
When a process terminates with a system exit code set to 1,
C{processEnded} is called with a L{error.ProcessTerminated} error,
the C{exitCode} attribute reflecting the system exit code.
"""
exe = sys.executable
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, exe, [exe, '-c', 'import sys; sys.exit(1)'],
env=None, usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessTerminated)
self.assertEqual(p.reason.value.exitCode, 1)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
usePTY=self.usePTY)
return d
def test_signalHUP(self):
"""
Sending the SIGHUP signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGHUP}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('HUP')
def test_signalINT(self):
"""
Sending the SIGINT signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGINT}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGKILL}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('KILL')
def test_signalTERM(self):
"""
Sending the SIGTERM signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGTERM}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('TERM')
def test_childSignalHandling(self):
"""
The disposition of signals which are ignored in the parent
process is reset to the default behavior for the child
process.
"""
# Somewhat arbitrarily select SIGUSR1 here. It satisfies our
# requirements that:
# - The interpreter not fiddle around with the handler
# behind our backs at startup time (this disqualifies
# signals like SIGINT and SIGPIPE).
# - The default behavior is to exit.
#
# This lets us send the signal to the child and then verify
# that it exits with a status code indicating that it was
# indeed the signal which caused it to exit.
which = signal.SIGUSR1
# Ignore the signal in the parent (and make sure we clean it
# up).
handler = signal.signal(which, signal.SIG_IGN)
self.addCleanup(signal.signal, signal.SIGUSR1, handler)
# Now do the test.
return self._testSignal(signal.SIGUSR1)
def test_executionError(self):
"""
Raise an error during execvpe to check error management.
"""
cmd = self.getCommand('false')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
def buggyexecvpe(command, args, environment):
raise RuntimeError("Ouch")
oldexecvpe = os.execvpe
os.execvpe = buggyexecvpe
try:
reactor.spawnProcess(p, cmd, ['false'], env=None,
usePTY=self.usePTY)
def check(ignored):
errData = "".join(p.errData + p.outData)
self.assertIn("Upon execvpe", errData)
self.assertIn("Ouch", errData)
d.addCallback(check)
finally:
os.execvpe = oldexecvpe
return d
def test_errorInProcessEnded(self):
"""
The handler which reaps a process is removed when the process is
reaped, even if the protocol's C{processEnded} method raises an
exception.
"""
connected = defer.Deferred()
ended = defer.Deferred()
# This script runs until we disconnect its transport.
pythonExecutable = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
class ErrorInProcessEnded(protocol.ProcessProtocol):
"""
A protocol that raises an error in C{processEnded}.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
reactor.callLater(0, ended.callback, None)
raise RuntimeError("Deliberate error")
# Launch the process.
reactor.spawnProcess(
ErrorInProcessEnded(), pythonExecutable,
[pythonExecutable, scriptPath],
env=None, path=None)
pid = []
def cbConnected(transport):
pid.append(transport.pid)
# There's now a reap process handler registered.
self.assertIn(transport.pid, process.reapProcessHandlers)
# Kill the process cleanly, triggering an error in the protocol.
transport.loseConnection()
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The exception was logged.
excs = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(excs), 1)
# The process is no longer scheduled for reaping.
self.assertNotIn(pid[0], process.reapProcessHandlers)
ended.addCallback(checkTerminated)
return ended
class MockSignal(object):
"""
Neuter L{signal.signal}, but pass other attributes unscathed
"""
def signal(self, sig, action):
return signal.getsignal(sig)
def __getattr__(self, attr):
return getattr(signal, attr)
class MockOS(object):
"""
The mock OS: overwrite L{os}, L{fcntl} and {sys} functions with fake ones.
@ivar exited: set to True when C{_exit} is called.
@type exited: C{bool}
@ivar O_RDWR: dumb value faking C{os.O_RDWR}.
@type O_RDWR: C{int}
@ivar O_NOCTTY: dumb value faking C{os.O_NOCTTY}.
@type O_NOCTTY: C{int}
@ivar WNOHANG: dumb value faking C{os.WNOHANG}.
@type WNOHANG: C{int}
@ivar raiseFork: if not C{None}, subsequent calls to fork will raise this
object.
@type raiseFork: C{NoneType} or C{Exception}
@ivar raiseExec: if set, subsequent calls to execvpe will raise an error.
@type raiseExec: C{bool}
@ivar fdio: fake file object returned by calls to fdopen.
@type fdio: C{StringIO.StringIO}
@ivar actions: hold names of some actions executed by the object, in order
of execution.
@type actions: C{list} of C{str}
@ivar closed: keep track of the file descriptor closed.
@type closed: C{list} of C{int}
@ivar child: whether fork return for the child or the parent.
@type child: C{bool}
@ivar pipeCount: count the number of time that C{os.pipe} has been called.
@type pipeCount: C{int}
@ivar raiseWaitPid: if set, subsequent calls to waitpid will raise
the error specified.
@type raiseWaitPid: C{None} or a class
@ivar waitChild: if set, subsequent calls to waitpid will return it.
@type waitChild: C{None} or a tuple
@ivar euid: the uid returned by the fake C{os.geteuid}
@type euid: C{int}
@ivar egid: the gid returned by the fake C{os.getegid}
@type egid: C{int}
@ivar seteuidCalls: stored results of C{os.seteuid} calls.
@type seteuidCalls: C{list}
@ivar setegidCalls: stored results of C{os.setegid} calls.
@type setegidCalls: C{list}
@ivar path: the path returned by C{os.path.expanduser}.
@type path: C{str}
@ivar raiseKill: if set, subsequent call to kill will raise the error
specified.
@type raiseKill: C{None} or an exception instance.
@ivar readData: data returned by C{os.read}.
@type readData: C{str}
"""
exited = False
raiseExec = False
fdio = None
child = True
raiseWaitPid = None
raiseFork = None
waitChild = None
euid = 0
egid = 0
path = None
raiseKill = None
readData = ""
def __init__(self):
"""
Initialize data structures.
"""
self.actions = []
self.closed = []
self.pipeCount = 0
self.O_RDWR = -1
self.O_NOCTTY = -2
self.WNOHANG = -4
self.WEXITSTATUS = lambda x: 0
self.WIFEXITED = lambda x: 1
self.seteuidCalls = []
self.setegidCalls = []
def open(self, dev, flags):
"""
Fake C{os.open}. Return a non fd number to be sure it's not used
elsewhere.
"""
return -3
def fstat(self, fd):
"""
Fake C{os.fstat}. Return a C{os.stat_result} filled with garbage.
"""
return os.stat_result((0,) * 10)
def fdopen(self, fd, flag):
"""
Fake C{os.fdopen}. Return a StringIO object whose content can be tested
later via C{self.fdio}.
"""
self.fdio = StringIO.StringIO()
return self.fdio
def setsid(self):
"""
Fake C{os.setsid}. Save action.
"""
self.actions.append('setsid')
def fork(self):
"""
Fake C{os.fork}. Save the action in C{self.actions}, and return 0 if
C{self.child} is set, or a dumb number.
"""
self.actions.append(('fork', gc.isenabled()))
if self.raiseFork is not None:
raise self.raiseFork
elif self.child:
# Child result is 0
return 0
else:
return 21
def close(self, fd):
"""
Fake C{os.close}, saving the closed fd in C{self.closed}.
"""
self.closed.append(fd)
def dup2(self, fd1, fd2):
"""
Fake C{os.dup2}. Do nothing.
"""
def write(self, fd, data):
"""
Fake C{os.write}. Save action.
"""
self.actions.append(("write", fd, data))
def read(self, fd, size):
"""
Fake C{os.read}: save action, and return C{readData} content.
@param fd: The file descriptor to read.
@param size: The maximum number of bytes to read.
@return: A fixed C{bytes} buffer.
"""
self.actions.append(('read', fd, size))
return self.readData
def execvpe(self, command, args, env):
"""
Fake C{os.execvpe}. Save the action, and raise an error if
C{self.raiseExec} is set.
"""
self.actions.append('exec')
if self.raiseExec:
raise RuntimeError("Bar")
def pipe(self):
"""
Fake C{os.pipe}. Return non fd numbers to be sure it's not used
elsewhere, and increment C{self.pipeCount}. This is used to uniquify
the result.
"""
self.pipeCount += 1
return - 2 * self.pipeCount + 1, - 2 * self.pipeCount
def ttyname(self, fd):
"""
Fake C{os.ttyname}. Return a dumb string.
"""
return "foo"
def _exit(self, code):
"""
Fake C{os._exit}. Save the action, set the C{self.exited} flag, and
raise C{SystemError}.
"""
self.actions.append(('exit', code))
self.exited = True
# Don't forget to raise an error, or you'll end up in parent
# code path.
raise SystemError()
def ioctl(self, fd, flags, arg):
"""
Override C{fcntl.ioctl}. Do nothing.
"""
def setNonBlocking(self, fd):
"""
Override C{fdesc.setNonBlocking}. Do nothing.
"""
def waitpid(self, pid, options):
"""
Override C{os.waitpid}. Return values meaning that the child process
has exited, save executed action.
"""
self.actions.append('waitpid')
if self.raiseWaitPid is not None:
raise self.raiseWaitPid
if self.waitChild is not None:
return self.waitChild
return 1, 0
def settrace(self, arg):
"""
Override C{sys.settrace} to keep coverage working.
"""
def getgid(self):
"""
Override C{os.getgid}. Return a dumb number.
"""
return 1235
def getuid(self):
"""
Override C{os.getuid}. Return a dumb number.
"""
return 1237
def setuid(self, val):
"""
Override C{os.setuid}. Do nothing.
"""
self.actions.append(('setuid', val))
def setgid(self, val):
"""
Override C{os.setgid}. Do nothing.
"""
self.actions.append(('setgid', val))
def setregid(self, val1, val2):
"""
Override C{os.setregid}. Do nothing.
"""
self.actions.append(('setregid', val1, val2))
def setreuid(self, val1, val2):
"""
Override C{os.setreuid}. Save the action.
"""
self.actions.append(('setreuid', val1, val2))
def switchUID(self, uid, gid):
"""
Override C{util.switchuid}. Save the action.
"""
self.actions.append(('switchuid', uid, gid))
def openpty(self):
"""
Override C{pty.openpty}, returning fake file descriptors.
"""
return -12, -13
def chdir(self, path):
"""
Override C{os.chdir}. Save the action.
@param path: The path to change the current directory to.
"""
self.actions.append(('chdir', path))
def geteuid(self):
"""
Mock C{os.geteuid}, returning C{self.euid} instead.
"""
return self.euid
def getegid(self):
"""
Mock C{os.getegid}, returning C{self.egid} instead.
"""
return self.egid
def seteuid(self, egid):
"""
Mock C{os.seteuid}, store result.
"""
self.seteuidCalls.append(egid)
def setegid(self, egid):
"""
Mock C{os.setegid}, store result.
"""
self.setegidCalls.append(egid)
def expanduser(self, path):
"""
Mock C{os.path.expanduser}.
"""
return self.path
def getpwnam(self, user):
"""
Mock C{pwd.getpwnam}.
"""
return 0, 0, 1, 2
def listdir(self, path):
"""
Override C{os.listdir}, returning fake contents of '/dev/fd'
"""
return "-1", "-2"
def kill(self, pid, signalID):
"""
Override C{os.kill}: save the action and raise C{self.raiseKill} if
specified.
"""
self.actions.append(('kill', pid, signalID))
if self.raiseKill is not None:
raise self.raiseKill
def unlink(self, filename):
"""
Override C{os.unlink}. Save the action.
@param filename: The file name to remove.
"""
self.actions.append(('unlink', filename))
def umask(self, mask):
"""
Override C{os.umask}. Save the action.
@param mask: The new file mode creation mask.
"""
self.actions.append(('umask', mask))
def getpid(self):
"""
Return a fixed PID value.
@return: A fixed value.
"""
return 6789
if process is not None:
class DumbProcessWriter(process.ProcessWriter):
"""
A fake L{process.ProcessWriter} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbProcessReader(process.ProcessReader):
"""
A fake L{process.ProcessReader} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbPTYProcess(process.PTYProcess):
"""
A fake L{process.PTYProcess} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class MockProcessTestCase(unittest.TestCase):
"""
Mock a process runner to test forked child code path.
"""
if process is None:
skip = "twisted.internet.process is never used on Windows"
def setUp(self):
"""
Replace L{process} os, fcntl, sys, switchUID, fdesc and pty modules
with the mock class L{MockOS}.
"""
if gc.isenabled():
self.addCleanup(gc.enable)
else:
self.addCleanup(gc.disable)
self.mockos = MockOS()
self.mockos.euid = 1236
self.mockos.egid = 1234
self.patch(process, "os", self.mockos)
self.patch(process, "fcntl", self.mockos)
self.patch(process, "sys", self.mockos)
self.patch(process, "switchUID", self.mockos.switchUID)
self.patch(process, "fdesc", self.mockos)
self.patch(process.Process, "processReaderFactory", DumbProcessReader)
self.patch(process.Process, "processWriterFactory", DumbProcessWriter)
self.patch(process, "pty", self.mockos)
self.mocksig = MockSignal()
self.patch(process, "signal", self.mocksig)
def tearDown(self):
"""
Reset processes registered for reap.
"""
process.reapProcessHandlers = {}
def test_mockFork(self):
"""
Test a classic spawnProcess. Check the path of the client code:
fork, exec, exit.
"""
gc.enable()
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", ("exit", 1)])
else:
self.fail("Should not be here")
# It should leave the garbage collector disabled.
self.assertFalse(gc.isenabled())
def _mockForkInParentTest(self):
"""
Assert that in the main process, spawnProcess disables the garbage
collector, calls fork, closes the pipe file descriptors it created for
the child process, and calls waitpid.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
# It should close the first read pipe, and the 2 last write pipes
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6]))
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
def test_mockForkInParentGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
returns if it was initially enabled.
@see L{_mockForkInParentTest}
"""
gc.enable()
self._mockForkInParentTest()
self.assertTrue(gc.isenabled())
def test_mockForkInParentGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when L{reactor.spawnProcess}
returns if it was initially disabled.
@see L{_mockForkInParentTest}
"""
gc.disable()
self._mockForkInParentTest()
self.assertFalse(gc.isenabled())
def test_mockForkTTY(self):
"""
Test a TTY spawnProcess: check the path of the client code:
fork, exec, exit.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
self.assertRaises(SystemError, reactor.spawnProcess, p, cmd, ['ouch'],
env=None, usePTY=True)
self.assertTrue(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[("fork", False), "setsid", "exec", ("exit", 1)])
def _mockWithForkError(self):
"""
Assert that if the fork call fails, no other process setup calls are
made and that spawnProcess raises the exception fork raised.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [("fork", False)])
def test_mockWithForkErrorGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
raises because L{os.fork} raised, if it was initially enabled.
"""
gc.enable()
self._mockWithForkError()
self.assertTrue(gc.isenabled())
def test_mockWithForkErrorGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when
L{reactor.spawnProcess} raises because L{os.fork} raised, if it was
initially disabled.
"""
gc.disable()
self._mockWithForkError()
self.assertFalse(gc.isenabled())
def test_mockForkErrorCloseFDs(self):
"""
When C{os.fork} raises an exception, the file descriptors created
before are closed and don't leak.
"""
self._mockWithForkError()
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6, -2, -3, -5]))
def test_mockForkErrorGivenFDs(self):
"""
When C{os.forks} raises an exception and that file descriptors have
been specified with the C{childFDs} arguments of
L{reactor.spawnProcess}, they are not closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: -10, 1: -11, 2: -13})
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
# We can also put "r" or "w" to let twisted create the pipes
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: "r", 1: -11, 2: -13})
self.assertEqual(set(self.mockos.closed), set([-1, -2]))
def test_mockForkErrorClosePTY(self):
"""
When C{os.fork} raises an exception, the file descriptors created by
C{pty.openpty} are closed and don't leak, when C{usePTY} is set to
C{True}.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=True)
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(set(self.mockos.closed), set([-12, -13]))
def test_mockForkErrorPTYGivenFDs(self):
"""
If a tuple is passed to C{usePTY} to specify slave and master file
descriptors and that C{os.fork} raises an exception, these file
descriptors aren't closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=(-20, -21, 'foo'))
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
def test_mockWithExecError(self):
"""
Spawn a process but simulate an error during execution in the client
path: C{os.execvpe} raises an error. It should close all the standard
fds, try to print the error encountered, and exit cleanly.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
self.mockos.raiseExec = True
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", ("exit", 1)])
# Check that fd have been closed
self.assertIn(0, self.mockos.closed)
self.assertIn(1, self.mockos.closed)
self.assertIn(2, self.mockos.closed)
# Check content of traceback
self.assertIn("RuntimeError: Bar", self.mockos.fdio.getvalue())
else:
self.fail("Should not be here")
def test_mockSetUid(self):
"""
Try creating a process with setting its uid: it's almost the same path
as the standard path, but with a C{switchUID} call before the exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), ('setuid', 0), ('setgid', 0),
('switchuid', 8080, 1234), 'exec', ('exit', 1)])
else:
self.fail("Should not be here")
def test_mockSetUidInParent(self):
"""
When spawning a child process with a UID different from the UID of the
current process, the current process does not have its UID changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockPTYSetUid(self):
"""
Try creating a PTY process with setting its uid: it's almost the same
path as the standard path, but with a C{switchUID} call before the
exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8081)
except SystemError:
self.assertTrue(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), 'setsid', ('setuid', 0), ('setgid', 0),
('switchuid', 8081, 1234), 'exec', ('exit', 1)])
else:
self.fail("Should not be here")
def test_mockPTYSetUidInParent(self):
"""
When spawning a child process with PTY and a UID different from the UID
of the current process, the current process does not have its UID
changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
oldPTYProcess = process.PTYProcess
try:
process.PTYProcess = DumbPTYProcess
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8080)
finally:
process.PTYProcess = oldPTYProcess
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockWithWaitError(self):
"""
Test that reapProcess logs errors raised.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
proc.reapProcess()
errors = self.flushLoggedErrors()
self.assertEqual(len(errors), 1)
errors[0].trap(OSError)
def test_mockErrorECHILDInReapProcess(self):
"""
Test that reapProcess doesn't log anything when waitpid raises a
C{OSError} with errno C{ECHILD}.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
self.mockos.raiseWaitPid.errno = errno.ECHILD
# This should not produce any errors
proc.reapProcess()
def test_mockErrorInPipe(self):
"""
If C{os.pipe} raises an exception after some pipes where created, the
created pipes are closed and don't leak.
"""
pipes = [-1, -2, -3, -4]
def pipe():
try:
return pipes.pop(0), pipes.pop(0)
except IndexError:
raise OSError()
self.mockos.pipe = pipe
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [])
self.assertEqual(set(self.mockos.closed), set([-4, -3, -2, -1]))
def test_kill(self):
"""
L{process.Process.signalProcess} calls C{os.kill} translating the given
signal string to the PID.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
proc.signalProcess("KILL")
self.assertEqual(self.mockos.actions,
[('fork', False), 'waitpid', ('kill', 21, signal.SIGKILL)])
def test_killExited(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited.
"""
self.mockos.child = False
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
# We didn't specify a waitpid value, so the waitpid call in
# registerReapProcessHandler has already reaped the process
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killExitedButNotDetected(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited but that twisted hasn't seen it (for example,
if the process has been waited outside of twisted): C{os.kill} then
raise C{OSError} with C{errno.ESRCH} as errno.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.ESRCH, "Not found")
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killErrorInKill(self):
"""
L{process.Process.signalProcess} doesn't mask C{OSError} exceptions if
the errno is different from C{errno.ESRCH}.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.EINVAL, "Invalid signal")
err = self.assertRaises(OSError,
proc.signalProcess, "KILL")
self.assertEquals(err.errno, errno.EINVAL)
class PosixProcessTestCase(unittest.TestCase, PosixProcessBase):
# add two non-pty test cases
def test_stderr(self):
"""
Bytes written to stderr by the spawned process are passed to the
C{errReceived} callback on the C{ProcessProtocol} passed to
C{spawnProcess}.
"""
cmd = sys.executable
value = "42"
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd,
[cmd, "-c",
"import sys; sys.stderr.write('%s')" % (value,)],
env=None, path="/tmp",
usePTY=self.usePTY)
def processEnded(ign):
self.assertEqual(value, p.errF.getvalue())
return d.addCallback(processEnded)
def testProcess(self):
cmd = self.getCommand('gzip')
s = "there's no place like home!\n" * 3
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd, [cmd, "-c"], env=None, path="/tmp",
usePTY=self.usePTY)
p.transport.write(s)
p.transport.closeStdin()
def processEnded(ign):
f = p.outF
f.seek(0, 0)
gf = gzip.GzipFile(fileobj=f)
self.assertEqual(gf.read(), s)
return d.addCallback(processEnded)
class PosixProcessTestCasePTY(unittest.TestCase, PosixProcessBase):
"""
Just like PosixProcessTestCase, but use ptys instead of pipes.
"""
usePTY = True
# PTYs only offer one input and one output. What still makes sense?
# testNormalTermination
# test_abnormalTermination
# testSignal
# testProcess, but not without p.transport.closeStdin
# might be solveable: TODO: add test if so
def testOpeningTTY(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tty.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None, usePTY=self.usePTY)
p.transport.write("hello world!\n")
def processEnded(ign):
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'HUP')
self.assertEqual(
p.outF.getvalue(),
"hello world!\r\nhello world!\r\n",
"Error message from process_tty follows:\n\n%s\n\n" % p.outF.getvalue())
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs,
usePTY=1, childFDs={1:'r'})
class Win32SignalProtocol(SignalProtocol):
"""
A win32-specific process protocol that handles C{processEnded}
differently: processes should exit with exit code 1.
"""
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to 1.
Otherwise, errback with a C{ValueError} describing the problem.
"""
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if v.exitCode != 1:
return self.deferred.errback(
ValueError("Wrong exit code: %s" % (reason.exitCode,)))
self.deferred.callback(None)
class Win32ProcessTestCase(unittest.TestCase):
"""
Test process programs that are packaged with twisted.
"""
def testStdinReader(self):
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_stdinreader.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath], env=None,
path=None)
p.transport.write("hello, world")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "err\nerr\n")
self.assertEqual(p.outF.getvalue(), "out\nhello, world\nout\n")
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, uid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, gid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, usePTY=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, childFDs={1:'r'})
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = Win32SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
return d
def test_signalTERM(self):
"""
Sending the SIGTERM signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('TERM')
def test_signalINT(self):
"""
Sending the SIGINT signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('KILL')
def test_closeHandles(self):
"""
The win32 handles should be properly closed when the process exits.
"""
import win32api
connected = defer.Deferred()
ended = defer.Deferred()
class SimpleProtocol(protocol.ProcessProtocol):
"""
A protocol that fires deferreds when connected and disconnected.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
ended.callback(None)
p = SimpleProtocol()
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
proc = reactor.spawnProcess(p, pyExe, pyArgs)
def cbConnected(transport):
self.assertIdentical(transport, proc)
# perform a basic validity test on the handles
win32api.GetHandleInformation(proc.hProcess)
win32api.GetHandleInformation(proc.hThread)
# And save their values for later
self.hProcess = proc.hProcess
self.hThread = proc.hThread
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The attributes on the process object must be reset...
self.assertIdentical(proc.pid, None)
self.assertIdentical(proc.hProcess, None)
self.assertIdentical(proc.hThread, None)
# ...and the handles must be closed.
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hProcess)
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hThread)
ended.addCallback(checkTerminated)
return defer.gatherResults([connected, ended])
class Win32UnicodeEnvironmentTest(unittest.TestCase):
"""
Tests for Unicode environment on Windows
"""
goodKey = u'UNICODE'
goodValue = u'UNICODE'
def test_encodableUnicodeEnvironment(self):
"""
Test C{os.environ} (inherited by every subprocess on Windows) that
contains an ascii-encodable Unicode string. This is different from
passing Unicode environment explicitly to spawnProcess (which is not
supported).
"""
os.environ[self.goodKey] = self.goodValue
self.addCleanup(operator.delitem, os.environ, self.goodKey)
p = GetEnvironmentDictionary.run(reactor, [], {})
def gotEnvironment(environ):
self.assertEqual(
environ[self.goodKey.encode('ascii')],
self.goodValue.encode('ascii'))
return p.getResult().addCallback(gotEnvironment)
class Dumbwin32procPidTest(unittest.TestCase):
"""
Simple test for the pid attribute of Process on win32.
"""
def test_pid(self):
"""
Launch process with mock win32process. The only mock aspect of this
module is that the pid of the process created will always be 42.
"""
from twisted.internet import _dumbwin32proc
from twisted.test import mock_win32process
self.patch(_dumbwin32proc, "win32process", mock_win32process)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
d = defer.Deferred()
processProto = TrivialProcessProtocol(d)
comspec = str(os.environ["COMSPEC"])
cmd = [comspec, "/c", exe, scriptPath]
p = _dumbwin32proc.Process(reactor,
processProto,
None,
cmd,
{},
None)
self.assertEqual(42, p.pid)
self.assertEqual("<Process pid=42>", repr(p))
def pidCompleteCb(result):
self.assertEqual(None, p.pid)
return d.addCallback(pidCompleteCb)
class UtilTestCase(unittest.TestCase):
"""
Tests for process-related helper functions (currently only
L{procutils.which}.
"""
def setUp(self):
"""
Create several directories and files, some of which are executable
and some of which are not. Save the current PATH setting.
"""
j = os.path.join
base = self.mktemp()
self.foo = j(base, "foo")
self.baz = j(base, "baz")
self.foobar = j(self.foo, "bar")
self.foobaz = j(self.foo, "baz")
self.bazfoo = j(self.baz, "foo")
self.bazbar = j(self.baz, "bar")
for d in self.foobar, self.foobaz, self.bazfoo, self.bazbar:
os.makedirs(d)
for name, mode in [(j(self.foobaz, "executable"), 0700),
(j(self.foo, "executable"), 0700),
(j(self.bazfoo, "executable"), 0700),
(j(self.bazfoo, "executable.bin"), 0700),
(j(self.bazbar, "executable"), 0)]:
f = file(name, "w")
f.close()
os.chmod(name, mode)
self.oldPath = os.environ.get('PATH', None)
os.environ['PATH'] = os.pathsep.join((
self.foobar, self.foobaz, self.bazfoo, self.bazbar))
def tearDown(self):
"""
Restore the saved PATH setting, and set all created files readable
again so that they can be deleted easily.
"""
os.chmod(os.path.join(self.bazbar, "executable"), stat.S_IWUSR)
if self.oldPath is None:
try:
del os.environ['PATH']
except KeyError:
pass
else:
os.environ['PATH'] = self.oldPath
def test_whichWithoutPATH(self):
"""
Test that if C{os.environ} does not have a C{'PATH'} key,
L{procutils.which} returns an empty list.
"""
del os.environ['PATH']
self.assertEqual(procutils.which("executable"), [])
def testWhich(self):
j = os.path.join
paths = procutils.which("executable")
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
def testWhichPathExt(self):
j = os.path.join
old = os.environ.get('PATHEXT', None)
os.environ['PATHEXT'] = os.pathsep.join(('.bin', '.exe', '.sh'))
try:
paths = procutils.which("executable")
finally:
if old is None:
del os.environ['PATHEXT']
else:
os.environ['PATHEXT'] = old
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable"),
j(self.bazfoo, "executable.bin")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
class ClosingPipesProcessProtocol(protocol.ProcessProtocol):
output = ''
errput = ''
def __init__(self, outOrErr):
self.deferred = defer.Deferred()
self.outOrErr = outOrErr
def processEnded(self, reason):
self.deferred.callback(reason)
def outReceived(self, data):
self.output += data
def errReceived(self, data):
self.errput += data
class ClosingPipes(unittest.TestCase):
def doit(self, fd):
"""
Create a child process and close one of its output descriptors using
L{IProcessTransport.closeStdout} or L{IProcessTransport.closeStderr}.
Return a L{Deferred} which fires after verifying that the descriptor was
really closed.
"""
p = ClosingPipesProcessProtocol(True)
self.assertFailure(p.deferred, error.ProcessTerminated)
p.deferred.addCallback(self._endProcess, p)
reactor.spawnProcess(
p, sys.executable, [
sys.executable, '-u', '-c',
'raw_input()\n'
'import sys, os, time\n'
# Give the system a bit of time to notice the closed
# descriptor. Another option would be to poll() for HUP
# instead of relying on an os.write to fail with SIGPIPE.
# However, that wouldn't work on OS X (or Windows?).
'for i in range(1000):\n'
' os.write(%d, "foo\\n")\n'
' time.sleep(0.01)\n'
'sys.exit(42)\n' % (fd,)
],
env=None)
if fd == 1:
p.transport.closeStdout()
elif fd == 2:
p.transport.closeStderr()
else:
raise RuntimeError
# Give the close time to propagate
p.transport.write('go\n')
# make the buggy case not hang
p.transport.closeStdin()
return p.deferred
def _endProcess(self, reason, p):
"""
Check that a failed write prevented the process from getting to its
custom exit code.
"""
# child must not get past that write without raising
self.assertNotEquals(
reason.exitCode, 42, 'process reason was %r' % reason)
self.assertEqual(p.output, '')
return p.errput
def test_stdout(self):
"""
ProcessProtocol.transport.closeStdout actually closes the pipe.
"""
d = self.doit(1)
def _check(errput):
self.assertIn('OSError', errput)
if runtime.platform.getType() != 'win32':
self.assertIn('Broken pipe', errput)
d.addCallback(_check)
return d
def test_stderr(self):
"""
ProcessProtocol.transport.closeStderr actually closes the pipe.
"""
d = self.doit(2)
def _check(errput):
# there should be no stderr open, so nothing for it to
# write the error to.
self.assertEqual(errput, '')
d.addCallback(_check)
return d
skipMessage = "wrong platform or reactor doesn't support IReactorProcess"
if (runtime.platform.getType() != 'posix') or (not interfaces.IReactorProcess(reactor, None)):
PosixProcessTestCase.skip = skipMessage
PosixProcessTestCasePTY.skip = skipMessage
TestTwoProcessesPosix.skip = skipMessage
FDTest.skip = skipMessage
if (runtime.platform.getType() != 'win32') or (not interfaces.IReactorProcess(reactor, None)):
Win32ProcessTestCase.skip = skipMessage
TestTwoProcessesNonPosix.skip = skipMessage
Dumbwin32procPidTest.skip = skipMessage
Win32UnicodeEnvironmentTest.skip = skipMessage
if not interfaces.IReactorProcess(reactor, None):
ProcessTestCase.skip = skipMessage
ClosingPipes.skip = skipMessage
|
XiaodunServerGroup/xiaodun-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/overview.py
|
6
|
"""
Course Outline page in Studio.
"""
from bok_choy.page_object import PageObject
from bok_choy.query import SubQuery
from bok_choy.promise import EmptyPromise, fulfill
from .course_page import CoursePage
from .unit import UnitPage
class CourseOutlineContainer(object):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
def child(self, title, child_class=None):
if not child_class:
child_class = self.CHILD_CLASS
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
SubQuery(css=child_class.NAME_SELECTOR).filter(text=title)
)[0]['data-locator']
)
class CourseOutlineChild(PageObject):
"""
A mixin to a CourseOutline page object that will be used as a child of
:class:`CourseOutlineContainer`.
"""
NAME_SELECTOR = None
BODY_SELECTOR = None
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
@property
def name(self):
"""
Return the display name of this object.
"""
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
def __repr__(self):
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Overview page.
"""
url = None
BODY_SELECTOR = '.courseware-unit'
NAME_SELECTOR = '.unit-name'
def go_to(self):
"""
Open the unit page linked to by this unit link, and return
an initialized :class:`.UnitPage` for that unit.
"""
return UnitPage(self.browser, self.locator).visit()
class CourseOutlineSubsection(CourseOutlineChild, CourseOutlineContainer):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Overview page.
"""
url = None
BODY_SELECTOR = '.courseware-subsection'
NAME_SELECTOR = '.subsection-name-value'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def toggle_expand(self):
"""
Toggle the expansion of this subsection.
"""
self.disable_jquery_animations()
def subsection_expanded():
return all(
self.q(css=self._bounded_selector('.new-unit-item'))
.map(lambda el: el.visible)
.results
)
currently_expanded = subsection_expanded()
self.css_click(self._bounded_selector('.expand-collapse'))
fulfill(EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the subsection {} has been toggled".format(self.locator),
))
return self
class CourseOutlineSection(CourseOutlineChild, CourseOutlineContainer):
"""
:class`.PageObject` that wraps a section block on the Studio Course Overview page.
"""
url = None
BODY_SELECTOR = '.courseware-section'
NAME_SELECTOR = '.section-name-span'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
def is_browser_on_page(self):
return self.is_css_present('body.view-outline')
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
|
mjirayu/sit_academy
|
refs/heads/master
|
common/lib/xmodule/xmodule/static_content.py
|
70
|
# /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import logging
import hashlib
import os
import errno
import sys
from collections import defaultdict
from docopt import docopt
from path import path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = []
module_styles_lines.append("@import 'bourbon/bourbon';")
module_styles_lines.append("@import 'bourbon/addons/button';")
module_styles_lines.append("@import 'assets/anims';")
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ in classes:
module_js = class_.get_javascript()
# It will enforce 000 prefix for xmodule.js.
js_fragments.add((0, 'js', module_js.get('xmodule_js')))
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx + 1, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest() # pylint: disable=too-many-function-args
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
|
memtoko/django
|
refs/heads/master
|
tests/utils_tests/test_no_submodule.py
|
737
|
# Used to test for modules which don't have submodules.
|
cuongnv23/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
|
19
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step in case the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError as e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg = str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
if __name__ == '__main__':
main()
|
andyraib/data-storage
|
refs/heads/master
|
python_scripts/env/lib/python3.6/site-packages/matplotlib/_version.py
|
6
|
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
{
"dirty": false,
"error": null,
"full-revisionid": "1bfc7551f32f7b42ba50620a837f03e51d5b7c77",
"version": "2.0.0"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
tornadozou/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/utils/gc.py
|
45
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# Create the directories.
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# Create a simple parser that pulls the export_version from the directory.
path_regex = "^" + re.escape(base_dir) + "/(\\d+)$"
def parser(path):
match = re.match(path_regex, path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# Delete everything not in 'both'.
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(compat.as_str_any(base_dir),
compat.as_str_any(r)),
None))
if p:
paths.append(p)
return sorted(paths)
|
rkn2/generate3DEC
|
refs/heads/master
|
low_poly_wrl.py
|
1
|
#works, suppresses dialog box
import rhinoscriptsyntax as rs
# iterates through layers
layers = rs.LayerNames()
for layer in layers:
if layer != 'concrete': #can be altered to exclude anything in deformablekeys!
# select layer
rs.Command("-_SelLayer " + layer)
rs.Command("-_Mesh DetailedOptions SimplePlane=Yes Enter")
directory = 'C:\\Users\\Rebecca Napolitano\\Documents\\datafiles\\Romanbondingcourses\\2017_10_26_experiments\\'
filename = 'test'
filetype = '.wrl'
# make cmdstr, include layer if there are multiple layers
if len(layers) > 1:
path = "\"" + directory + filename + "_" + layer + filetype + "\""
else:
path = "\"" + directory + filename + filetype + "\""
rs.Command("-_SelNone ")
rs.Command("-_SelLayer " + layer)
rs.Command("-_Invert ")
rs.Command("Hide Enter")
rs.Command("-_SelMesh ")
cmdstr = "-_Export " + path
if filetype == ".wrl":
cmdstr += " Enter Enter"
# execute command
cmd = rs.Command(cmdstr)
if not(cmd):
success = False
rs.Command("-_SelNone" )
rs.Command("Show" )
|
bottompawn/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_email/test__header_value_parser.py
|
68
|
import string
import unittest
from email import _header_value_parser as parser
from email import errors
from email import policy
from test.test_email import TestEmailBase, parameterize
class TestTokens(TestEmailBase):
# EWWhiteSpaceTerminal
def test_EWWhiteSpaceTerminal(self):
x = parser.EWWhiteSpaceTerminal(' \t', 'fws')
self.assertEqual(x, ' \t')
self.assertEqual(str(x), '')
self.assertEqual(x.value, '')
self.assertEqual(x.encoded, ' \t')
# UnstructuredTokenList
def test_undecodable_bytes_error_preserved(self):
badstr = b"le pouf c\xaflebre".decode('ascii', 'surrogateescape')
unst = parser.get_unstructured(badstr)
self.assertDefectsEqual(unst.all_defects, [errors.UndecodableBytesDefect])
parts = list(unst.parts)
self.assertDefectsEqual(parts[0].all_defects, [])
self.assertDefectsEqual(parts[1].all_defects, [])
self.assertDefectsEqual(parts[2].all_defects, [errors.UndecodableBytesDefect])
class TestParserMixin:
def _assert_results(self, tl, rest, string, value, defects, remainder,
comments=None):
self.assertEqual(str(tl), string)
self.assertEqual(tl.value, value)
self.assertDefectsEqual(tl.all_defects, defects)
self.assertEqual(rest, remainder)
if comments is not None:
self.assertEqual(tl.comments, comments)
def _test_get_x(self, method, source, string, value, defects,
remainder, comments=None):
tl, rest = method(source)
self._assert_results(tl, rest, string, value, defects, remainder,
comments=None)
return tl
def _test_parse_x(self, method, input, string, value, defects,
comments=None):
tl = method(input)
self._assert_results(tl, '', string, value, defects, '', comments)
return tl
class TestParser(TestParserMixin, TestEmailBase):
# _wsp_splitter
rfc_printable_ascii = bytes(range(33, 127)).decode('ascii')
rfc_atext_chars = (string.ascii_letters + string.digits +
"!#$%&\'*+-/=?^_`{}|~")
rfc_dtext_chars = rfc_printable_ascii.translate(str.maketrans('','',r'\[]'))
def test__wsp_splitter_one_word(self):
self.assertEqual(parser._wsp_splitter('foo', 1), ['foo'])
def test__wsp_splitter_two_words(self):
self.assertEqual(parser._wsp_splitter('foo def', 1),
['foo', ' ', 'def'])
def test__wsp_splitter_ws_runs(self):
self.assertEqual(parser._wsp_splitter('foo \t def jik', 1),
['foo', ' \t ', 'def jik'])
# get_fws
def test_get_fws_only(self):
fws = self._test_get_x(parser.get_fws, ' \t ', ' \t ', ' ', [], '')
self.assertEqual(fws.token_type, 'fws')
def test_get_fws_space(self):
self._test_get_x(parser.get_fws, ' foo', ' ', ' ', [], 'foo')
def test_get_fws_ws_run(self):
self._test_get_x(parser.get_fws, ' \t foo ', ' \t ', ' ', [], 'foo ')
# get_encoded_word
def test_get_encoded_word_missing_start_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('abc')
def test_get_encoded_word_missing_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc')
def test_get_encoded_word_missing_middle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc?=')
def test_get_encoded_word_valid_ew(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this_is_a_test?= bird',
'this is a test',
'this is a test',
[],
' bird')
def test_get_encoded_word_internal_spaces(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this is a test?= bird',
'this is a test',
'this is a test',
[errors.InvalidHeaderDefect],
' bird')
def test_get_encoded_word_gets_first(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?= =?utf-8?q?second?=',
'first',
'first',
[],
' =?utf-8?q?second?=')
def test_get_encoded_word_gets_first_even_if_no_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?==?utf-8?q?second?=',
'first',
'first',
[],
'=?utf-8?q?second?=')
def test_get_encoded_word_sets_extra_attributes(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii*jive?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.encoded, '=?us-ascii*jive?q?first_second?=')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, 'jive')
def test_get_encoded_word_lang_default_is_blank(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.encoded, '=?us-ascii?q?first_second?=')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, '')
def test_get_encoded_word_non_printable_defect(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first\x02second?=',
'first\x02second',
'first\x02second',
[errors.NonPrintableDefect],
'')
def test_get_encoded_word_leading_internal_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?=20foo?=',
' foo',
' foo',
[],
'')
def test_get_encoded_word_quopri_utf_escape_follows_cte(self):
# Issue 18044
self._test_get_x(parser.get_encoded_word,
'=?utf-8?q?=C3=89ric?=',
'Éric',
'Éric',
[],
'')
# get_unstructured
def _get_unst(self, value):
token = parser.get_unstructured(value)
return token, ''
def test_get_unstructured_null(self):
self._test_get_x(self._get_unst, '', '', '', [], '')
def test_get_unstructured_one_word(self):
self._test_get_x(self._get_unst, 'foo', 'foo', 'foo', [], '')
def test_get_unstructured_normal_phrase(self):
self._test_get_x(self._get_unst, 'foo bar bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_normal_phrase_with_whitespace(self):
self._test_get_x(self._get_unst, 'foo \t bar bird',
'foo \t bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_leading_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar',
' foo bar',
' foo bar',
[],
'')
def test_get_unstructured_trailing_whitespace(self):
self._test_get_x(self._get_unst, 'foo bar ',
'foo bar ',
'foo bar ',
[],
'')
def test_get_unstructured_leading_and_trailing_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar ',
' foo bar ',
' foo bar ',
[],
'')
def test_get_unstructured_one_valid_ew_no_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?=',
'bar',
'bar',
[],
'')
def test_get_unstructured_one_ew_trailing_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= ',
'bar ',
'bar ',
[],
'')
def test_get_unstructured_one_valid_ew_trailing_text(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= bird',
'bar bird',
'bar bird',
[],
'')
def test_get_unstructured_phrase_with_ew_in_middle_of_text(self):
self._test_get_x(self._get_unst, 'foo =?us-ascii?q?bar?= bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_phrase_with_two_ew(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_phrase_with_two_ew_trailing_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?= ',
'foo barbird ',
'foo barbird ',
[],
'')
def test_get_unstructured_phrase_with_ew_with_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?bar?=',
' bar',
' bar',
[],
'')
def test_get_unstructured_phrase_with_two_ew_extra_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= \t =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_two_ew_extra_ws_trailing_text(self):
self._test_get_x(self._get_unst,
'=?us-ascii?q?test?= =?us-ascii?q?foo?= val',
'testfoo val',
'testfoo val',
[],
'')
def test_get_unstructured_ew_with_internal_ws(self):
self._test_get_x(self._get_unst,
'=?iso-8859-1?q?hello=20world?=',
'hello world',
'hello world',
[],
'')
def test_get_unstructured_ew_with_internal_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?=20test?= =?us-ascii?q?=20foo?= val',
' test foo val',
' test foo val',
[],
'')
def test_get_unstructured_invaild_ew(self):
self._test_get_x(self._get_unst,
'=?test val',
'=?test val',
'=?test val',
[],
'')
def test_get_unstructured_undecodable_bytes(self):
self._test_get_x(self._get_unst,
b'test \xACfoo val'.decode('ascii', 'surrogateescape'),
'test \uDCACfoo val',
'test \uDCACfoo val',
[errors.UndecodableBytesDefect],
'')
def test_get_unstructured_undecodable_bytes_in_EW(self):
self._test_get_x(self._get_unst,
(b'=?us-ascii?q?=20test?= =?us-ascii?q?=20\xACfoo?='
b' val').decode('ascii', 'surrogateescape'),
' test \uDCACfoo val',
' test \uDCACfoo val',
[errors.UndecodableBytesDefect]*2,
'')
def test_get_unstructured_missing_base64_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dmk?=',
'vi',
'vi',
[errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_character(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k===?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect],
'')
def test_get_unstructured_invalid_base64_character_and_bad_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_no_whitespace_between_ews(self):
self._test_get_x(self._get_unst,
'=?utf-8?q?foo?==?utf-8?q?bar?=',
'foobar',
'foobar',
[errors.InvalidHeaderDefect],
'')
# get_qp_ctext
def test_get_qp_ctext_only(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foobar', 'foobar', ' ', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qp_ctext_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('(', r'\(')
with_qp = with_qp.replace(')', r'\)')
ptext = self._test_get_x(parser.get_qp_ctext,
with_qp, self.rfc_printable_ascii, ' ', [], '')
def test_get_qp_ctext_two_words_gets_first(self):
self._test_get_x(parser.get_qp_ctext,
'foo de', 'foo', ' ', [], ' de')
def test_get_qp_ctext_following_wsp_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo \t\tde', 'foo', ' ', [], ' \t\tde')
def test_get_qp_ctext_up_to_close_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo)', 'foo', ' ', [], ')')
def test_get_qp_ctext_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo )', 'foo', ' ', [], ' )')
def test_get_qp_ctext_close_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo)bar', 'foo', ' ', [], ')bar')
def test_get_qp_ctext_up_to_open_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo(', 'foo', ' ', [], '(')
def test_get_qp_ctext_wsp_before_open_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo (', 'foo', ' ', [], ' (')
def test_get_qp_ctext_open_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo(bar', 'foo', ' ', [], '(bar')
def test_get_qp_ctext_non_printables(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foo\x00bar)', 'foo\x00bar', ' ',
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_qcontent
def test_get_qcontent_only(self):
ptext = self._test_get_x(parser.get_qcontent,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qcontent_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('"', r'\"')
ptext = self._test_get_x(parser.get_qcontent, with_qp,
self.rfc_printable_ascii,
self.rfc_printable_ascii, [], '')
def test_get_qcontent_two_words_gets_first(self):
self._test_get_x(parser.get_qcontent,
'foo de', 'foo', 'foo', [], ' de')
def test_get_qcontent_following_wsp_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo \t\tde', 'foo', 'foo', [], ' \t\tde')
def test_get_qcontent_up_to_dquote_only(self):
self._test_get_x(parser.get_qcontent,
'foo"', 'foo', 'foo', [], '"')
def test_get_qcontent_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo "', 'foo', 'foo', [], ' "')
def test_get_qcontent_close_paren_mid_word(self):
self._test_get_x(parser.get_qcontent,
'foo"bar', 'foo', 'foo', [], '"bar')
def test_get_qcontent_non_printables(self):
ptext = self._test_get_x(parser.get_qcontent,
'foo\x00fg"', 'foo\x00fg', 'foo\x00fg',
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_atext
def test_get_atext_only(self):
atext = self._test_get_x(parser.get_atext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(atext.token_type, 'atext')
def test_get_atext_all_atext(self):
atext = self._test_get_x(parser.get_atext, self.rfc_atext_chars,
self.rfc_atext_chars,
self.rfc_atext_chars, [], '')
def test_get_atext_two_words_gets_first(self):
self._test_get_x(parser.get_atext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_atext_following_wsp_preserved(self):
self._test_get_x(parser.get_atext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_atext_up_to_special(self):
self._test_get_x(parser.get_atext,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_atext_non_printables(self):
atext = self._test_get_x(parser.get_atext,
'foo\x00bar(', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], '(')
self.assertEqual(atext.defects[0].non_printables[0], '\x00')
# get_bare_quoted_string
def test_get_bare_quoted_string_only(self):
bqs = self._test_get_x(parser.get_bare_quoted_string,
'"foo"', '"foo"', 'foo', [], '')
self.assertEqual(bqs.token_type, 'bare-quoted-string')
def test_get_bare_quoted_string_must_start_with_dquote(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string(' "foo"')
def test_get_bare_quoted_string_following_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"\t bar', '"foo"', 'foo', [], '\t bar')
def test_get_bare_quoted_string_multiple_words(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo bar moo"', '"foo bar moo"', 'foo bar moo', [], '')
def test_get_bare_quoted_string_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'" foo moo\t"', '" foo moo\t"', ' foo moo\t', [], '')
def test_get_bare_quoted_string_end_dquote_mid_word(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"bar', '"foo"', 'foo', [], 'bar')
def test_get_bare_quoted_string_quoted_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
r'"foo\"in"a', r'"foo\"in"', 'foo"in', [], 'a')
def test_get_bare_quoted_string_non_printables(self):
self._test_get_x(parser.get_bare_quoted_string,
'"a\x01a"', '"a\x01a"', 'a\x01a',
[errors.NonPrintableDefect], '')
def test_get_bare_quoted_string_no_end_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo', '"foo"', 'foo',
[errors.InvalidHeaderDefect], '')
self._test_get_x(parser.get_bare_quoted_string,
'"foo ', '"foo "', 'foo ',
[errors.InvalidHeaderDefect], '')
def test_get_bare_quoted_string_empty_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
# Issue 16983: apply postel's law to some bad encoding.
def test_encoded_word_inside_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'"=?utf-8?Q?not_really_valid?="',
'"not really valid"',
'not really valid',
[errors.InvalidHeaderDefect],
'')
# get_comment
def test_get_comment_only(self):
comment = self._test_get_x(parser.get_comment,
'(comment)', '(comment)', ' ', [], '', ['comment'])
self.assertEqual(comment.token_type, 'comment')
def test_get_comment_must_start_with_paren(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_comment('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_comment(' (foo"')
def test_get_comment_following_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'(comment) \t', '(comment)', ' ', [], ' \t', ['comment'])
def test_get_comment_multiple_words(self):
self._test_get_x(parser.get_comment,
'(foo bar) \t', '(foo bar)', ' ', [], ' \t', ['foo bar'])
def test_get_comment_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'( foo bar\t ) \t', '( foo bar\t )', ' ', [], ' \t',
[' foo bar\t '])
def test_get_comment_end_paren_mid_word(self):
self._test_get_x(parser.get_comment,
'(foo)bar', '(foo)', ' ', [], 'bar', ['foo'])
def test_get_comment_quoted_parens(self):
self._test_get_x(parser.get_comment,
'(foo\) \(\)bar)', '(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
def test_get_comment_non_printable(self):
self._test_get_x(parser.get_comment,
'(foo\x7Fbar)', '(foo\x7Fbar)', ' ',
[errors.NonPrintableDefect], '', ['foo\x7Fbar'])
def test_get_comment_no_end_paren(self):
self._test_get_x(parser.get_comment,
'(foo bar', '(foo bar)', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar'])
self._test_get_x(parser.get_comment,
'(foo bar ', '(foo bar )', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar '])
def test_get_comment_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo(bar))', '(foo(bar))', ' ', [], '', ['foo(bar)'])
self.assertEqual(comment[1].content, 'bar')
def test_get_comment_nested_comment_wsp(self):
comment = self._test_get_x(parser.get_comment,
'(foo ( bar ) )', '(foo ( bar ) )', ' ', [], '', ['foo ( bar ) '])
self.assertEqual(comment[2].content, ' bar ')
def test_get_comment_empty_comment(self):
self._test_get_x(parser.get_comment,
'()', '()', ' ', [], '', [''])
def test_get_comment_multiple_nesting(self):
comment = self._test_get_x(parser.get_comment,
'(((((foo)))))', '(((((foo)))))', ' ', [], '', ['((((foo))))'])
for i in range(4, 0, -1):
self.assertEqual(comment[0].content, '('*(i-1)+'foo'+')'*(i-1))
comment = comment[0]
self.assertEqual(comment.content, 'foo')
def test_get_comment_missing_end_of_nesting(self):
self._test_get_x(parser.get_comment,
'(((((foo)))', '(((((foo)))))', ' ',
[errors.InvalidHeaderDefect]*2, '', ['((((foo))))'])
def test_get_comment_qs_in_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo (b\)))', '(foo (b\)))', ' ', [], '', ['foo (b\))'])
self.assertEqual(comment[2].content, 'b)')
# get_cfws
def test_get_cfws_only_ws(self):
cfws = self._test_get_x(parser.get_cfws,
' \t \t', ' \t \t', ' ', [], '', [])
self.assertEqual(cfws.token_type, 'cfws')
def test_get_cfws_only_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo)', '(foo)', ' ', [], '', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_only_mixed(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ) ( bar) ', ' (foo ) ( bar) ', ' ', [], '',
['foo ', ' bar'])
self.assertEqual(cfws[1].content, 'foo ')
self.assertEqual(cfws[3].content, ' bar')
def test_get_cfws_ends_at_non_leader(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) bar', '(foo) ', ' ', [], 'bar', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_ends_at_non_printable(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) \x07', '(foo) ', ' ', [], '\x07', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_non_printable_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo \x07) "test"', '(foo \x07) ', ' ',
[errors.NonPrintableDefect], '"test"', ['foo \x07'])
self.assertEqual(cfws[0].content, 'foo \x07')
def test_get_cfws_header_ends_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ', ' (foo )', ' ',
[errors.InvalidHeaderDefect], '', ['foo '])
self.assertEqual(cfws[1].content, 'foo ')
def test_get_cfws_multiple_nested_comments(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo (bar)) ((a)(a))', '(foo (bar)) ((a)(a))', ' ', [],
'', ['foo (bar)', '(a)(a)'])
self.assertEqual(cfws[0].comments, ['foo (bar)'])
self.assertEqual(cfws[2].comments, ['(a)(a)'])
# get_quoted_string
def test_get_quoted_string_only(self):
qs = self._test_get_x(parser.get_quoted_string,
'"bob"', '"bob"', 'bob', [], '')
self.assertEqual(qs.token_type, 'quoted-string')
self.assertEqual(qs.quoted_value, '"bob"')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" ', '\t "bob" ', ' bob ', [], '')
self.assertEqual(qs.quoted_value, ' "bob" ')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_comments_and_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) "bob"(bar)', ' (foo) "bob"(bar)', ' bob ', [], '')
self.assertEqual(qs[0][1].content, 'foo')
self.assertEqual(qs[2][0].content, 'bar')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_with_multiple_comments(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) (bar) "bob"(bird)', ' (foo) (bar) "bob"(bird)', ' bob ',
[], '')
self.assertEqual(qs[0].comments, ['foo', 'bar'])
self.assertEqual(qs[2].comments, ['bird'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_non_printable_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (\x0A) "bob"', ' (\x0A) "bob"', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['\x0A'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_non_printable_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "a\x0B"', ' (a) "a\x0B"', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'a\x0B')
self.assertEqual(qs.quoted_value, ' "a\x0B"')
def test_get_quoted_string_internal_ws(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "foo bar "', ' (a) "foo bar "', ' foo bar ',
[], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'foo bar ')
self.assertEqual(qs.quoted_value, ' "foo bar "')
def test_get_quoted_string_header_ends_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob" (a', ' (a) "bob" (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs[2].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_header_ends_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob', ' (a) "bob"', ' bob',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_no_quoted_string(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_quoted_string(' (ab) xyz')
def test_get_quoted_string_qs_ends_at_noncfws(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" fee', '\t "bob" ', ' bob ', [], 'fee')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
# get_atom
def test_get_atom_only(self):
atom = self._test_get_x(parser.get_atom,
'bob', 'bob', 'bob', [], '')
self.assertEqual(atom.token_type, 'atom')
def test_get_atom_with_wsp(self):
self._test_get_x(parser.get_atom,
'\t bob ', '\t bob ', ' bob ', [], '')
def test_get_atom_with_comments_and_wsp(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar)', ' (foo) bob(bar)', ' bob ', [], '')
self.assertEqual(atom[0][1].content, 'foo')
self.assertEqual(atom[2][0].content, 'bar')
def test_get_atom_with_multiple_comments(self):
atom = self._test_get_x(parser.get_atom,
' (foo) (bar) bob(bird)', ' (foo) (bar) bob(bird)', ' bob ',
[], '')
self.assertEqual(atom[0].comments, ['foo', 'bar'])
self.assertEqual(atom[2].comments, ['bird'])
def test_get_atom_non_printable_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (\x0A) bob', ' (\x0A) bob', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['\x0A'])
def test_get_atom_non_printable_in_atext(self):
atom = self._test_get_x(parser.get_atom,
' (a) a\x0B', ' (a) a\x0B', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['a'])
def test_get_atom_header_ends_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (a) bob (a', ' (a) bob (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(atom[0].comments, ['a'])
self.assertEqual(atom[2].comments, ['a'])
def test_get_atom_no_atom(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) ')
def test_get_atom_no_atom_before_special(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) @')
def test_get_atom_atom_ends_at_special(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar) @bang', ' (foo) bob(bar) ', ' bob ', [], '@bang')
self.assertEqual(atom[0].comments, ['foo'])
self.assertEqual(atom[2].comments, ['bar'])
def test_get_atom_atom_ends_at_noncfws(self):
self._test_get_x(parser.get_atom,
'bob fred', 'bob ', 'bob ', [], 'fred')
def test_get_atom_rfc2047_atom(self):
self._test_get_x(parser.get_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_dot_atom_text
def test_get_dot_atom_text(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo.bar.bang', 'foo.bar.bang', 'foo.bar.bang', [], '')
self.assertEqual(dot_atom_text.token_type, 'dot-atom-text')
self.assertEqual(len(dot_atom_text), 5)
def test_get_dot_atom_text_lone_atom_is_valid(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo', 'foo', 'foo', [], '')
def test_get_dot_atom_text_raises_on_leading_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('.foo.bar')
def test_get_dot_atom_text_raises_on_trailing_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('foo.bar.')
def test_get_dot_atom_text_raises_on_leading_non_atext(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text(' foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('@foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('"foo.bar"')
def test_get_dot_atom_text_trailing_text_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_dot_atom_text_trailing_ws_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo .bar', 'foo', 'foo', [], ' .bar')
# get_dot_atom
def test_get_dot_atom_only(self):
dot_atom = self._test_get_x(parser.get_dot_atom,
'foo.bar.bing', 'foo.bar.bing', 'foo.bar.bing', [], '')
self.assertEqual(dot_atom.token_type, 'dot-atom')
self.assertEqual(len(dot_atom), 1)
def test_get_dot_atom_with_wsp(self):
self._test_get_x(parser.get_dot_atom,
'\t foo.bar.bing ', '\t foo.bar.bing ', ' foo.bar.bing ', [], '')
def test_get_dot_atom_with_comments_and_wsp(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar.bing (here) ', ' (sing) foo.bar.bing (here) ',
' foo.bar.bing ', [], '')
def test_get_dot_atom_space_ends_dot_atom(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar .bing (here) ', ' (sing) foo.bar ',
' foo.bar ', [], '.bing (here) ')
def test_get_dot_atom_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) ')
def test_get_dot_atom_leading_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) .bar')
def test_get_dot_atom_two_dots_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom('bar..bang')
def test_get_dot_atom_trailing_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) bar.bang. foo')
def test_get_dot_atom_rfc2047_atom(self):
self._test_get_x(parser.get_dot_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_word (if this were black box we'd repeat all the qs/atom tests)
def test_get_word_atom_yields_atom(self):
word = self._test_get_x(parser.get_word,
' (foo) bar (bang) :ah', ' (foo) bar (bang) ', ' bar ', [], ':ah')
self.assertEqual(word.token_type, 'atom')
self.assertEqual(word[0].token_type, 'cfws')
def test_get_word_qs_yields_qs(self):
word = self._test_get_x(parser.get_word,
'"bar " (bang) ah', '"bar " (bang) ', 'bar ', [], 'ah')
self.assertEqual(word.token_type, 'quoted-string')
self.assertEqual(word[0].token_type, 'bare-quoted-string')
self.assertEqual(word[0].value, 'bar ')
self.assertEqual(word.content, 'bar ')
def test_get_word_ends_at_dot(self):
self._test_get_x(parser.get_word,
'foo.', 'foo', 'foo', [], '.')
# get_phrase
def test_get_phrase_simple(self):
phrase = self._test_get_x(parser.get_phrase,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'Fred A. Johnson is his name',
[],
', oh.')
self.assertEqual(phrase.token_type, 'phrase')
def test_get_phrase_complex(self):
phrase = self._test_get_x(parser.get_phrase,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' bird hand is messy ',
[],
'<>\t')
self.assertEqual(phrase[0][0].comments, ['A'])
self.assertEqual(phrase[0][2].comments, ['in (my|your)'])
def test_get_phrase_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'Fred A. .O Johnson',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(phrase), 7)
self.assertEqual(phrase[3].comments, ['weird'])
def test_get_phrase_pharse_must_start_with_word(self):
phrase = self._test_get_x(parser.get_phrase,
'(even weirder).name',
'(even weirder).name',
' .name',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(phrase), 3)
self.assertEqual(phrase[0].comments, ['even weirder'])
def test_get_phrase_ending_with_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'simple phrase. ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(phrase), 4)
self.assertEqual(phrase[3].comments, ['with trailing comment'])
def get_phrase_cfws_only_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_phrase(' (foo) ')
# get_local_part
def test_get_local_part_simple(self):
local_part = self._test_get_x(parser.get_local_part,
'dinsdale@python.org', 'dinsdale', 'dinsdale', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred.A.Johnson@python.org',
'Fred.A.Johnson',
'Fred.A.Johnson',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' Fred.A.Johnson @python.org',
' Fred.A.Johnson ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) Fred.A.Johnson (bar (bird)) @python.org',
' (foo) Fred.A.Johnson (bar (bird)) ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_quoted(self):
local_part = self._test_get_x(parser.get_local_part,
'"dinsdale"@python.org', '"dinsdale"', '"dinsdale"', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_quoted_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'"Fred.A.Johnson"@python.org',
'"Fred.A.Johnson"',
'"Fred.A.Johnson"',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_quoted_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' "Fred A. Johnson" @python.org',
' "Fred A. Johnson" ',
' "Fred A. Johnson" ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred A. Johnson')
def test_get_local_part_quoted_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) " Fred A. Johnson " (bar (bird)) @python.org',
' (foo) " Fred A. Johnson " (bar (bird)) ',
' " Fred A. Johnson " ',
[],
'@python.org')
self.assertEqual(local_part.local_part, ' Fred A. Johnson ')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_obsolete(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred. A.Johnson@python.org',
'Fred. A.Johnson',
'Fred. A.Johnson',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_complex_obsolete_1(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "',
' Fred . A. Johnson.and dogs ',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson.and dogs ')
def test_get_local_part_complex_obsolete_invalid(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"',
' Fred . A. Johnson and dogs',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson and dogs')
def test_get_local_part_no_part_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) ')
def test_get_local_part_special_instead_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) @python.org')
def test_get_local_part_trailing_dot(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.@python.org',
' borris.',
' borris.',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_trailing_dot_with_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' borris. @python.org',
' borris. ',
' borris. ',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_leading_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'.borris@python.org',
'.borris',
'.borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_leading_dot_after_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' .borris@python.org',
' .borris',
' .borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_double_dot_raises(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.(foo).natasha@python.org',
' borris.(foo).natasha',
' borris. .natasha',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris..natasha')
def test_get_local_part_quoted_strings_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
'""example" example"@example.com',
'""example" example"',
'example example',
[errors.InvalidHeaderDefect]*3,
'@example.com')
self.assertEqual(local_part.local_part, 'example example')
def test_get_local_part_valid_and_invalid_qp_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
r'"\\"example\\" example"@example.com',
r'"\\"example\\" example"',
r'\example\\ example',
[errors.InvalidHeaderDefect]*5,
'@example.com')
self.assertEqual(local_part.local_part, r'\example\\ example')
def test_get_local_part_unicode_defect(self):
# Currently this only happens when parsing unicode, not when parsing
# stuff that was originally binary.
local_part = self._test_get_x(parser.get_local_part,
'exámple@example.com',
'exámple',
'exámple',
[errors.NonASCIILocalPartDefect],
'@example.com')
self.assertEqual(local_part.local_part, 'exámple')
# get_dtext
def test_get_dtext_only(self):
dtext = self._test_get_x(parser.get_dtext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(dtext.token_type, 'ptext')
def test_get_dtext_all_dtext(self):
dtext = self._test_get_x(parser.get_dtext, self.rfc_dtext_chars,
self.rfc_dtext_chars,
self.rfc_dtext_chars, [], '')
def test_get_dtext_two_words_gets_first(self):
self._test_get_x(parser.get_dtext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_dtext_following_wsp_preserved(self):
self._test_get_x(parser.get_dtext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_dtext_non_printables(self):
dtext = self._test_get_x(parser.get_dtext,
'foo\x00bar]', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], ']')
self.assertEqual(dtext.defects[0].non_printables[0], '\x00')
def test_get_dtext_with_qp(self):
ptext = self._test_get_x(parser.get_dtext,
r'foo\]\[\\bar\b\e\l\l',
r'foo][\barbell',
r'foo][\barbell',
[errors.ObsoleteHeaderDefect],
'')
def test_get_dtext_up_to_close_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo]', 'foo', 'foo', [], ']')
def test_get_dtext_wsp_before_close_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo ]', 'foo', 'foo', [], ' ]')
def test_get_dtext_close_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo]bar', 'foo', 'foo', [], ']bar')
def test_get_dtext_up_to_open_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo[', 'foo', 'foo', [], '[')
def test_get_dtext_wsp_before_open_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo [', 'foo', 'foo', [], ' [')
def test_get_dtext_open_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
# get_domain_literal
def test_get_domain_literal_only(self):
domain_literal = domain_literal = self._test_get_x(parser.get_domain_literal,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain_literal.token_type, 'domain-literal')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_internal_ws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'[ 127.0.0.1\t ]',
'[ 127.0.0.1\t ]',
'[ 127.0.0.1 ]',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_surrounding_cfws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'(foo)[ 127.0.0.1] (bar)',
'(foo)[ 127.0.0.1] (bar)',
' [ 127.0.0.1] ',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_no_start_char_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) ')
def test_get_domain_literal_no_start_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) @')
def test_get_domain_literal_bad_dtext_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) [abc[@')
# get_domain
def test_get_domain_regular_domain_only(self):
domain = self._test_get_x(parser.get_domain,
'example.com',
'example.com',
'example.com',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_only(self):
domain = self._test_get_x(parser.get_domain,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example.com(bar)\t',
'(foo) example.com(bar)\t',
' example.com ',
[],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar)',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
'')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_domain_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)example.com\t(bar), next',
'(foo)example.com\t(bar)',
' example.com ',
[],
', next')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar), next',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
', next')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_obsolete(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example . (bird)com(bar)\t',
'(foo) example . (bird)com(bar)\t',
' example . com ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_no_non_cfws_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t")
def test_get_domain_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t, broken")
# get_addr_spec
def test_get_addr_spec_normal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(addr_spec.token_type, 'addr-spec')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_doamin_literal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, '[127.0.0.1]')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@[127.0.0.1]')
def test_get_addr_spec_with_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
' dinsdale@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_qouoted_string_and_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
' "roy a bug"@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_ends_at_special(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) , next',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) ',
' "roy a bug"@example.com ',
[],
', next')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_quoted_strings_in_atom_list(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(addr_spec.local_part, 'example example')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"example example"@example.com')
def test_get_addr_spec_dot_atom(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'star.a.star@example.com',
'star.a.star@example.com',
'star.a.star@example.com',
[],
'')
self.assertEqual(addr_spec.local_part, 'star.a.star')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'star.a.star@example.com')
# get_obs_route
def test_get_obs_route_simple(self):
obs_route = self._test_get_x(parser.get_obs_route,
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
[],
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_complex(self):
obs_route = self._test_get_x(parser.get_obs_route,
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
' ,, @example.com ,@two. example.com :',
[errors.ObsoleteHeaderDefect], # This is the obs-domain
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_no_route_before_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com,')
def test_get_obs_route_no_route_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) [abc],')
def test_get_obs_route_no_route_before_special_raises2(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com [abc],')
# get_angle_addr
def test_get_angle_addr_simple(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_empty(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<>',
'<>',
'<>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertIsNone(angle_addr.local_part)
self.assertIsNone(angle_addr.domain)
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '<>')
def test_get_angle_addr_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
' (foo) <dinsdale@example.com>(bar)',
' (foo) <dinsdale@example.com>(bar)',
' <dinsdale@example.com> ',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_qs_and_domain_literal(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
[],
'')
self.assertEqual(angle_addr.local_part, 'Fred Perfect')
self.assertEqual(angle_addr.domain, '[127.0.0.1]')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '"Fred Perfect"@[127.0.0.1]')
def test_get_angle_addr_internal_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<(foo) dinsdale@example.com(bar)>',
'<(foo) dinsdale@example.com(bar)>',
'< dinsdale@example.com >',
[],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_obs_route(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
' <@example.com, @two.example.com: dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertEqual(angle_addr.route, ['example.com', 'two.example.com'])
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com (foo)',
'<dinsdale@example.com (foo)>',
'<dinsdale@example.com >',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_ends_at_special(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com> (foo), next',
'<dinsdale@example.com> (foo)',
'<dinsdale@example.com> ',
[],
', next')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_no_angle_raise(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) ')
def test_get_angle_addr_no_angle_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) , next')
def test_get_angle_addr_no_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('bar')
def test_get_angle_addr_special_after_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) <, bar')
# get_display_name This is phrase but with a different value.
def test_get_display_name_simple(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A Johnson',
'Fred A Johnson',
'Fred A Johnson',
[],
'')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A Johnson')
def test_get_display_name_complex1(self):
display_name = self._test_get_x(parser.get_display_name,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'"Fred A. Johnson is his name"',
[],
', oh.')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A. Johnson is his name')
def test_get_display_name_complex2(self):
display_name = self._test_get_x(parser.get_display_name,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' "bird hand is messy" ',
[],
'<>\t')
self.assertEqual(display_name[0][0].comments, ['A'])
self.assertEqual(display_name[0][2].comments, ['in (my|your)'])
self.assertEqual(display_name.display_name, 'bird hand is messy')
def test_get_display_name_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'"Fred A. .O Johnson"',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(display_name), 7)
self.assertEqual(display_name[3].comments, ['weird'])
self.assertEqual(display_name.display_name, 'Fred A. .O Johnson')
def test_get_display_name_pharse_must_start_with_word(self):
display_name = self._test_get_x(parser.get_display_name,
'(even weirder).name',
'(even weirder).name',
' ".name"',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(display_name), 3)
self.assertEqual(display_name[0].comments, ['even weirder'])
self.assertEqual(display_name.display_name, '.name')
def test_get_display_name_ending_with_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'"simple phrase." ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(display_name), 4)
self.assertEqual(display_name[3].comments, ['with trailing comment'])
self.assertEqual(display_name.display_name, 'simple phrase.')
# get_name_addr
def test_get_name_addr_angle_addr_only(self):
name_addr = self._test_get_x(parser.get_name_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertIsNone(name_addr.display_name)
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name_with_cfws(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
' Dinsdale <dinsdale@example.com> ',
[],
'')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_name_with_cfws_and_dots(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
' "Roy.A.Bear" <dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_qs_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_with_route(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertEqual(name_addr.route, ['two.example.com'])
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_ends_at_special(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>, next',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
', next')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_no_content_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ')
def test_get_name_addr_no_content_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ,')
def test_get_name_addr_no_angle_after_display_name_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr('foo bar')
# get_mailbox
def test_get_mailbox_addr_spec_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_angle_addr_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_name_addr(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_ends_at_special(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>, rest',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
', rest')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_quoted_strings_in_atom_list(self):
mailbox = self._test_get_x(parser.get_mailbox,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(mailbox.local_part, 'example example')
self.assertEqual(mailbox.domain, 'example.com')
self.assertEqual(mailbox.addr_spec, '"example example"@example.com')
# get_mailbox_list
def test_get_mailbox_list_single_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 1)
mailbox = mailbox_list.mailboxes[0]
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_simple_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_name_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_complex(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
(' "Roy A. Bear" <dinsdale@example.com> ,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_unparseable_mailbox_null(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.InvalidHeaderDefect, # the 'extra' text after the local part
errors.InvalidHeaderDefect, # the local part with no angle-addr
errors.ObsoleteHeaderDefect, # period in extra text (example.com)
errors.ObsoleteHeaderDefect], # (bird) in valid address.
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIsNone(mailbox_list.all_mailboxes[0].display_name)
self.assertEqual(mailbox_list.all_mailboxes[0].local_part,
'Roy A. Bear')
self.assertIsNone(mailbox_list.all_mailboxes[0].domain)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'"Roy A. Bear"')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_junk_after_valid_address(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_empty_list_element(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, ,,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
def test_get_mailbox_list_only_empty_elements(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'(foo),, (bar)',
'(foo),, (bar)',
' ,, ',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(mailbox_list.mailboxes), 0)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
# get_group_list
def test_get_group_list_cfws_only(self):
group_list = self._test_get_x(parser.get_group_list,
'(hidden);',
'(hidden)',
' ',
[],
';')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_mailbox_list(self):
group_list = self._test_get_x(parser.get_group_list,
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
[],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 2)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
self.assertEqual(group_list.mailboxes[1].display_name,
'Fred A. Bear')
def test_get_group_list_obs_group_list(self):
group_list = self._test_get_x(parser.get_group_list,
', (foo),,(bar)',
', (foo),,(bar)',
', ,, ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_comment_only_invalid(self):
group_list = self._test_get_x(parser.get_group_list,
'(bar)',
'(bar)',
' ',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
# get_group
def test_get_group_empty(self):
group = self._test_get_x(parser.get_group,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_null_addr_spec(self):
group = self._test_get_x(parser.get_group,
'foo: <>;',
'foo: <>;',
'foo: <>;',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.display_name, 'foo')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(len(group.all_mailboxes), 1)
self.assertEqual(group.all_mailboxes[0].value, '<>')
def test_get_group_cfws_only(self):
group = self._test_get_x(parser.get_group,
'Monty Python: (hidden);',
'Monty Python: (hidden);',
'Monty Python: ;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_single_mailbox(self):
group = self._test_get_x(parser.get_group,
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 1)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
def test_get_group_mixed_list(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger <ping@exampele.com>, x@test.example.com;'),
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].display_name,
'Roger')
self.assertEqual(group.mailboxes[2].local_part, 'x')
def test_get_group_one_invalid(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger ping@exampele.com, x@test.example.com;'),
[errors.InvalidHeaderDefect, # non-angle addr makes local part invalid
errors.InvalidHeaderDefect], # and its not obs-local either: no dots.
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 2)
self.assertEqual(len(group.all_mailboxes), 3)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].local_part, 'x')
self.assertIsNone(group.all_mailboxes[1].display_name)
# get_address
def test_get_address_simple(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_complex(self):
address = self._test_get_x(parser.get_address,
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
' "Fred A. Bear" < dinsdale@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_rfc2047_display_name(self):
address = self._test_get_x(parser.get_address,
'=?utf-8?q?=C3=89ric?= <foo@example.com>',
'Éric <foo@example.com>',
'Éric <foo@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Éric')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_empty_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
def test_get_address_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 2)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
self.assertEqual(address.mailboxes[0].local_part, 'x')
def test_get_address_quoted_local_part(self):
address = self._test_get_x(parser.get_address,
'"foo bar"@example.com',
'"foo bar"@example.com',
'"foo bar"@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address.mailboxes[0].local_part,
'foo bar')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_ends_at_special(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com, next',
'dinsdale@example.com',
'dinsdale@example.com',
[],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_invalid_mailbox_invalid(self):
address = self._test_get_x(parser.get_address,
'ping example.com, next',
'ping example.com',
'ping example.com',
[errors.InvalidHeaderDefect, # addr-spec with no domain
errors.InvalidHeaderDefect, # invalid local-part
errors.InvalidHeaderDefect, # missing .s in local-part
],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(len(address.all_mailboxes), 1)
self.assertIsNone(address.all_mailboxes[0].domain)
self.assertEqual(address.all_mailboxes[0].local_part, 'ping example.com')
self.assertEqual(address[0].token_type, 'invalid-mailbox')
def test_get_address_quoted_strings_in_atom_list(self):
address = self._test_get_x(parser.get_address,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(address.all_mailboxes[0].local_part, 'example example')
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
# get_address_list
def test_get_address_list_mailboxes_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list[0].token_type, 'address')
self.assertIsNone(address_list[0].display_name)
def test_get_address_list_mailboxes_two_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 2)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].local_part, 'foo')
self.assertEqual(address_list.mailboxes[1].display_name, "Fred A. Bar")
def test_get_address_list_mailboxes_complex(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo <x@example.com>,'
'"Nobody Is. Special" <y@example. com>'),
[errors.ObsoleteHeaderDefect, # period in Is.
errors.ObsoleteHeaderDefect], # cfws in domain
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 3)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.mailboxes[1].local_part, 'x')
self.assertEqual(address_list.mailboxes[2].display_name,
'Nobody Is. Special')
def test_get_address_list_mailboxes_invalid_addresses(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo x@example.com[],'
'"Nobody Is. Special" < example. com>'),
[errors.InvalidHeaderDefect, # invalid address in list
errors.InvalidHeaderDefect, # 'Foo x' local part invalid.
errors.InvalidHeaderDefect, # Missing . in 'Foo x' local part
errors.ObsoleteHeaderDefect, # period in 'Is.' disp-name phrase
errors.InvalidHeaderDefect, # no domain part in addr-spec
errors.ObsoleteHeaderDefect], # addr-spec has comment in it
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(len(address_list.all_mailboxes), 3)
self.assertEqual([str(x) for x in address_list.all_mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[1].token_type, 'address')
self.assertEqual(len(address_list.addresses[0].mailboxes), 1)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(
address_list.addresses[1].all_mailboxes[0].local_part, 'Foo x')
self.assertEqual(
address_list.addresses[2].all_mailboxes[0].display_name,
"Nobody Is. Special")
def test_get_address_list_group_empty(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: ;',
'Monty Python: ;',
'Monty Python: ;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 0)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 1)
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[0].display_name, 'Monty Python')
self.assertEqual(len(address_list.addresses[0].mailboxes), 0)
def test_get_address_list_group_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
def test_get_address_list_group_and_mailboxes(self):
address_list = self._test_get_x(parser.get_address_list,
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 4)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 3)
self.assertEqual(address_list.mailboxes[0].local_part, 'dinsdale')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
self.assertEqual(address_list.addresses[0].mailboxes[1].local_part,
'flint')
self.assertEqual(address_list.addresses[1].mailboxes[0].local_part,
'x')
self.assertEqual(address_list.addresses[2].mailboxes[0].local_part,
'y')
self.assertEqual(str(address_list.addresses[1]),
str(address_list.mailboxes[2]))
def test_invalid_content_disposition(self):
content_disp = self._test_parse_x(
parser.parse_content_disposition_header,
";attachment", "; attachment", ";attachment",
[errors.InvalidHeaderDefect]*2
)
def test_invalid_content_transfer_encoding(self):
cte = self._test_parse_x(
parser.parse_content_transfer_encoding_header,
";foo", ";foo", ";foo", [errors.InvalidHeaderDefect]*3
)
@parameterize
class Test_parse_mime_version(TestParserMixin, TestEmailBase):
def mime_version_as_value(self,
value,
tl_str,
tl_value,
major,
minor,
defects):
mime_version = self._test_parse_x(parser.parse_mime_version,
value, tl_str, tl_value, defects)
self.assertEqual(mime_version.major, major)
self.assertEqual(mime_version.minor, minor)
mime_version_params = {
'rfc_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0 ',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
' 1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1. 0',
1,
0,
[]),
'empty': (
'',
'',
'',
None,
None,
[errors.HeaderMissingRequiredValue]),
}
class TestFolding(TestEmailBase):
policy = policy.default
def _test(self, tl, folded, policy=policy):
self.assertEqual(tl.fold(policy=policy), folded, tl.ppstr())
def test_simple_unstructured_no_folds(self):
self._test(parser.get_unstructured("This is a test"),
"This is a test\n")
def test_simple_unstructured_folded(self):
self._test(parser.get_unstructured("This is also a test, but this "
"time there are enough words (and even some "
"symbols) to make it wrap; at least in theory."),
"This is also a test, but this time there are enough "
"words (and even some\n"
" symbols) to make it wrap; at least in theory.\n")
def test_unstructured_with_unicode_no_folds(self):
self._test(parser.get_unstructured("hübsch kleiner beißt"),
"=?utf-8?q?h=C3=BCbsch_kleiner_bei=C3=9Ft?=\n")
def test_one_ew_on_each_of_two_wrapped_lines(self):
self._test(parser.get_unstructured("Mein kleiner Kaktus ist sehr "
"hübsch. Es hat viele Stacheln "
"und oft beißt mich."),
"Mein kleiner Kaktus ist sehr =?utf-8?q?h=C3=BCbsch=2E?= "
"Es hat viele Stacheln\n"
" und oft =?utf-8?q?bei=C3=9Ft?= mich.\n")
def test_ews_combined_before_wrap(self):
self._test(parser.get_unstructured("Mein Kaktus ist hübsch. "
"Es beißt mich. "
"And that's all I'm sayin."),
"Mein Kaktus ist =?utf-8?q?h=C3=BCbsch=2E__Es_bei=C3=9Ft?= "
"mich. And that's\n"
" all I'm sayin.\n")
# XXX Need test of an encoded word so long that it needs to be wrapped
def test_simple_address(self):
self._test(parser.get_address_list("abc <xyz@example.com>")[0],
"abc <xyz@example.com>\n")
def test_address_list_folding_at_commas(self):
self._test(parser.get_address_list('abc <xyz@example.com>, '
'"Fred Blunt" <sharp@example.com>, '
'"J.P.Cool" <hot@example.com>, '
'"K<>y" <key@example.com>, '
'Firesale <cheap@example.com>, '
'<end@example.com>')[0],
'abc <xyz@example.com>, "Fred Blunt" <sharp@example.com>,\n'
' "J.P.Cool" <hot@example.com>, "K<>y" <key@example.com>,\n'
' Firesale <cheap@example.com>, <end@example.com>\n')
def test_address_list_with_unicode_names(self):
self._test(parser.get_address_list(
'Hübsch Kaktus <beautiful@example.com>, '
'beißt beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
def test_address_list_with_unicode_names_in_quotes(self):
self._test(parser.get_address_list(
'"Hübsch Kaktus" <beautiful@example.com>, '
'"beißt" beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
# XXX Need tests with comments on various sides of a unicode token,
# and with unicode tokens in the comments. Spaces inside the quotes
# currently don't do the right thing.
def test_initial_whitespace_splitting(self):
body = parser.get_unstructured(' ' + 'x'*77)
header = parser.Header([
parser.HeaderLabel([parser.ValueTerminal('test:', 'atext')]),
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), body])
self._test(header, 'test: \n ' + 'x'*77 + '\n')
def test_whitespace_splitting(self):
self._test(parser.get_unstructured('xxx ' + 'y'*77),
'xxx \n ' + 'y'*77 + '\n')
if __name__ == '__main__':
unittest.main()
|
robclewley/fovea
|
refs/heads/master
|
tests/track_text_test.py
|
1
|
"""
Tests for prototype code to track variable values with callbacks
"""
from __future__ import division
import PyDSTool as dst
from PyDSTool.Toolbox import phaseplane as pp
import numpy as np
from matplotlib import pyplot as plt
import fovea
import fovea.graphics as gx
from fovea.graphics import tracker_manager
tracker = tracker_manager()
class LogWrappedFunction(object):
def __init__(self, function):
import inspect
self.function = function
self.args, self.varargs, self.keywords, self.defaults = inspect.getargspec(function)
# ArgSpec(args=['gen', 'subdomain', 'n', 'maxsearch', 'eps', 't', 'jac'], varargs=None, keywords=None, defaults=(None, 5, 1000, 1e-08, 0, None))
def logAndCall(self, *arguments, **namedArguments):
print("Calling %s with arguments %s and named arguments %s" %\
(self.function.func_name, arguments, namedArguments))
self.function.__call__(*arguments, **namedArguments)
def logwrap(function):
return LogWrappedFunction(function).logAndCall
@logwrap
def doSomething(spam, eggs, foo, bar):
print("Doing something totally awesome with %s and %s." % (spam, eggs))
doSomething("beans","rice", foo="wiggity", bar="wack")
# ============================
def track_attribute(calc_con, attr_name):
"""
Create decorator to track named attribute used in a calculation
"""
def decorator(fn):
#obj =
#tracker.track_list = [getattr(obj, attr_name)]
calc_con.workspace
return fn
return decorator
def test_func_noattr(x, eps=1e-8):
"""
mock function that would use a tolerance eps and return a numerical
object that doesn't contain reference to that tolerance
"""
return x
# this doesn't let us get at the defaults unless we re-specify a default value
# in the wrapper (see logger above)
def wrap_test_func_noattr(x, eps=1e-8):
x = test_func_noattr(x, eps)
res = dst.args(x=x, eps=eps)
return res
#@track_attribute(cc, 'eps')
def test_func_attr(x, eps=1e-8):
"""
mock function that would use a tolerance eps and return a numerical
object that does contain reference to that tolerance
"""
res = dst.args(val=x, eps=eps)
return res
x1 = wrap_test_func_noattr(1.0, 1e-8)
x2 = test_func_attr(3.1, 1e-5)
cc = fovea.calc_context(dst.args(tracked_objects=[],
name='saddles'), 'saddles') # None would be sim object
wksp = cc.workspace
wksp.x1 = x1
wksp.x2 = x2
tracker(cc, 1, text_metadata='eps')
tracker.show()
|
UCL-InfoSec/loopix
|
refs/heads/master
|
loopix/loopix_provider.py
|
1
|
import os
import random
import itertools
import petlib.pack
from loopix_mixnode import LoopixMixNode
from provider_core import ProviderCore
from core import generate_random_string
from json_reader import JSONReader
class LoopixProvider(LoopixMixNode):
jsonReader = JSONReader(os.path.join(os.path.dirname(__file__), 'config.json'))
config_params = jsonReader.get_provider_config_params()
storage_inbox = {}
clients = {}
def __init__(self, sec_params, name, port, host, privk=None, pubk=None):
LoopixMixNode.__init__(self, sec_params, name, port, host, privk, pubk)
self.privk = privk or sec_params.group.G.order().random()
self.pubk = pubk or (self.privk * sec_params.group.G.generator())
self.crypto_node = ProviderCore((sec_params, self.config_params), self.name,
self.port, self.host, self.privk, self.pubk)
def subscribe_client(self, client_data):
subscribe_key, subscribe_host, subscribe_port = client_data
self.clients[subscribe_key] = (subscribe_host, subscribe_port)
print "[%s] > Subscribed client" % self.name
def read_packet(self, packet):
try:
decoded_packet = petlib.pack.decode(packet)
if decoded_packet[0] == 'SUBSCRIBE':
self.subscribe_client(decoded_packet[1:])
elif decoded_packet[0] == 'PULL':
pulled_messages = self.pull_messages(client_id=decoded_packet[1])
map(lambda (packet, addr): self.send(packet, addr),
zip(pulled_messages, itertools.repeat(self.clients[decoded_packet[1]])))
else:
flag, decrypted_packet = self.crypto_node.process_packet(decoded_packet)
if flag == "ROUT":
delay, new_header, new_body, next_addr, next_name = decrypted_packet
if self.is_assigned_client(next_name):
self.put_into_storage(next_name, (new_header, new_body))
else:
self.reactor.callFromThread(self.send_or_delay,
delay,
(new_header, new_body),
next_addr)
elif flag == "LOOP":
print "[%s] > Received loop message" % self.name
elif flag == "DROP":
print "[%s] > Received drop message" % self.name
except Exception, exp:
print "ERROR: ", str(exp)
def is_assigned_client(self, client_id):
return any(c == client_id for c in self.clients)
def put_into_storage(self, client_id, packet):
try:
self.storage_inbox[client_id].append(packet)
except KeyError, _:
self.storage_inbox[client_id] = [packet]
def pull_messages(self, client_id):
dummy_messages = []
popped_messages = self.get_clients_messages(client_id)
if len(popped_messages) < self.config_params.MAX_RETRIEVE:
dummy_messages = self.generate_dummy_messages(
self.config_params.MAX_RETRIEVE - len(popped_messages))
return popped_messages + dummy_messages
def get_clients_messages(self, client_id):
if client_id in self.storage_inbox.keys():
messages = self.storage_inbox[client_id]
popped, rest = messages[:self.config_params.MAX_RETRIEVE], messages[self.config_params.MAX_RETRIEVE:]
self.storage_inbox[client_id] = rest
return popped
return []
def generate_dummy_messages(self, num):
dummy_messages = [('DUMMY', generate_random_string(self.config_params.NOISE_LENGTH),
generate_random_string(self.config_params.NOISE_LENGTH)) for _ in range(num)]
return dummy_messages
def generate_random_path(self):
return self.construct_full_path()
def construct_full_path(self):
sequence = []
num_all_layers = len(self.pubs_mixes)
for i in range(num_all_layers):
mix = random.choice(self.pubs_mixes[i])
sequence.append(mix)
return sequence
|
cuboxi/android_external_chromium_org
|
refs/heads/kitkat
|
tools/valgrind/unused_suppressions.py
|
187
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import urllib2
import suppressions
def main():
supp = suppressions.GetSuppressions()
all_supps = []
for supps in supp.values():
all_supps += [s.description for s in supps]
sys.stdout.write(urllib2.urlopen(
'http://chromium-build-logs.appspot.com/unused_suppressions',
'\n'.join(all_supps)).read())
return 0
if __name__ == "__main__":
sys.exit(main())
|
agry/NGECore2
|
refs/heads/master
|
scripts/object/tangible/deed/vehicle_deed/speeder_stap_deed.py
|
2
|
import sys
def setup(core, object):
object.setAttachment('radial_filename', 'deeds/vehicleDeed')
return
def use(core, actor, object):
core.mountService.generateVehicle(actor, object, 'object/mobile/vehicle/shared_stap_speeder.iff', 'object/intangible/vehicle/shared_stap_speeder_pcd.iff')
return
|
KMK73/gradesavers
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
1558
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/completion/mockPatchObject2Py2/lib/mock/__init__.py
|
72
|
from mock.mock import *
|
alsoicode/cmsplugin-filer
|
refs/heads/develop
|
cmsplugin_filer_video/cms_plugins.py
|
6
|
from __future__ import unicode_literals
import os
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cmsplugin_filer_video import settings
from cmsplugin_filer_video.models import FilerVideo
from cmsplugin_filer_video.forms import VideoForm
from filer.settings import FILER_STATICMEDIA_PREFIX
class FilerVideoPlugin(CMSPluginBase):
module = 'Filer'
model = FilerVideo
name = _("Video")
form = VideoForm
render_template = "cmsplugin_filer_video/video.html"
text_enabled = True
general_fields = [
('movie', 'movie_url'),
'image',
('width', 'height'),
'auto_play',
'auto_hide',
'fullscreen',
'loop',
]
color_fields = [
'bgcolor',
'textcolor',
'seekbarcolor',
'seekbarbgcolor',
'loadingbarcolor',
'buttonoutcolor',
'buttonovercolor',
'buttonhighlightcolor',
]
fieldsets = [
(None, {
'fields': general_fields,
}),
]
if settings.VIDEO_PLUGIN_ENABLE_ADVANCED_SETTINGS:
fieldsets += [
(_('Color Settings'), {
'fields': color_fields,
'classes': ('collapse',),
}),
]
def render(self, context, instance, placeholder):
context.update({
'object': instance,
'placeholder': placeholder,
})
return context
def icon_src(self, instance):
return os.path.normpath("%s/icons/video_%sx%s.png" % (FILER_STATICMEDIA_PREFIX, 32, 32,))
plugin_pool.register_plugin(FilerVideoPlugin)
|
sgraham/nope
|
refs/heads/master
|
third_party/closure_linter/closure_linter/ecmalintrules.py
|
82
|
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
import gflags as flags
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(
['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.NORMAL):
# Dots are acceptable places to wrap.
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max_parts = 1
if '@param' in parts:
max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
> max_parts):
self._HandleError(
errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
"""
flag = token.attached_object
flag_type = flag.type
if flag_type and flag_type is not None and not flag_type.isspace():
pieces = self.TYPE_SPLIT.split(flag_type)
if len(pieces) == 1 and flag_type.count('|') == 1 and (
flag_type.endswith('|null') or flag_type.startswith('null|')):
self._HandleError(
errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % flag_type, token)
# TODO(user): We should do actual parsing of JsDoc types to report an
# error for wrong usage of '?' and '|' e.g. {?number|string|null} etc.
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(
errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
position=Position.AtBeginning())
def _CheckOperator(self, token):
"""Checks an operator for spacing and line style.
Args:
token: The operator token.
"""
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
token.previous, position=Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
position=Position.AtBeginning())
# Check that binary operators are not used to start lines.
if ((not last_code or last_code.line_number != token.line_number) and
not token.metadata.IsUnaryOperator()):
self._HandleError(
errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator should go on previous line "%s"' % token.string,
token)
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT)):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
token_type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, position=Position(0, space_count))
elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif token_type == Type.END_BLOCK:
# This check is for object literal end block tokens, but there is no need
# to test that condition since a comma at the end of any other kind of
# block is undoubtedly a parse error.
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(
errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of object literal', last_code,
position=Position.All(last_code.string))
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(
errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, position=Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called.
if state.InAssignedFunction():
if not is_immediately_called and (
last_in_line or token.next.type != Type.SEMICOLON):
self._HandleError(
errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, position=Position.AtEnd(token.string))
if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
and last_code.metadata.context.type != Context.OBJECT_LITERAL):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, position=Position.All(token.next.string))
elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(
last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, position=Position.All(token.string))
elif token_type == Type.START_PAREN:
if token.previous and token.previous.type == Type.KEYWORD:
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, position=Position.All(token.previous.string))
elif token_type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous,
position=Position.All(token.previous.string))
if token.type == Type.END_BRACKET:
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(
errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of array literal', last_code,
position=Position.All(last_code.string))
elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
position=Position(1, len(token.string) - 1))
elif token_type == Type.OPERATOR:
self._CheckOperator(token)
elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(
errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in re.split(r'\||,', flag.type):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(
errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type, token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after email address',
token.next,
position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.type and not flag.type.isspace():
self._CheckJsDocType(token)
if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(
errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
token_type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(
errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
('underscore' not in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(
errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(
errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(
errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(
errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(
errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(
errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(
errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (
Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(
errors.EXTRA_SPACE, 'Extra space at end of line', token,
position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, position=Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
token.previous.type not in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed.
Args:
state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (
state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
|
bmannix/selenium
|
refs/heads/master
|
py/selenium/webdriver/safari/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
kbauskar/mysql-server
|
refs/heads/5.7
|
storage/ndb/memcache/extra/memcached/testsuite/breakdancer/breakdancer.py
|
201
|
#!/usr/bin/env python
import itertools
class Condition(object):
"""Something asserted to be true during the test.
A given condition may be used as a precondition or a
postcondition."""
def __call__(self, k, state):
"""Called with a key and a state. True if the condition is met."""
return True
class Effect(object):
"""The affect an action will perform."""
def __call__(self, k, state):
"""Called with a key and a state.
The effect modifies the state as appropriate."""
class Action(object):
"""Actions are the operations that will be permuted into test cases.
Each action has a collection of preconditions and postconditions
that will be evaluated for checking input and output state for the
action.
Action.preconditions is the collection of conditions that must all
be true upon input to the action. If any condition is not true,
the effect is not executed and the action state is considered
"errored."
Action.effect is the callable that is expected to alter the state
to satisfy the postconditions of the action.
Action.postconditions is the collection of conditions that must
all be true after the effect of the action completes.
"""
preconditions = []
effect = None
postconditions = []
enabled = True
@property
def name(self):
"""The name of this action (default derived from class name)"""
n = self.__class__.__name__
return n[0].lower() + n[1:]
class Driver(object):
"""The driver "performs" the test."""
def newState(self):
"""Initialize and return the state for a test."""
return {}
def preSuite(self, seq):
"""Invoked with the sequence of tests before any are run."""
def startSequence(self, seq):
"""Invoked with the sequence of actions in a single test
before it is performed."""
def startAction(self, action):
"""Invoked when before starting an action."""
def endAction(self, action, state, errored):
"""Invoked after the action is performed."""
def endSequence(self, seq, state):
"""Invoked at the end of a sequence of tests."""
def postSuite(self, seq):
"""Invoked with the sequence of tests after all of them are run."""
def runTest(actions, driver, duplicates=3, length=4):
"""Run a test with the given collection of actions and driver.
The optional argument `duplicates' specifies how many times a
given action may be duplicated in a sequence.
The optional argument `length` specifies how long each test
sequence is.
"""
instances = itertools.chain(*itertools.repeat([a() for a in actions],
duplicates))
tests = set(itertools.permutations(instances, length))
driver.preSuite(tests)
for seq in sorted(tests):
state = driver.newState()
driver.startSequence(seq)
for a in seq:
driver.startAction(a)
haserror = not all(p(state) for p in a.preconditions)
if not haserror:
try:
a.effect(state)
haserror = not all(p(state) for p in a.postconditions)
except:
haserror = True
driver.endAction(a, state, haserror)
driver.endSequence(seq, state)
driver.postSuite(tests)
def findActions(classes):
"""Helper function to extract action subclasses from a collection
of classes."""
actions = []
for __t in (t for t in classes if isinstance(type, type(t))):
if Action in __t.__mro__ and __t != Action and __t.enabled:
actions.append(__t)
return actions
|
eob/synckit-research
|
refs/heads/master
|
perf/usage_generator_2010_02_01.py
|
1
|
# Site Model
import networkx as nx
from gen_model import *
from random import *
import datetime
import pickle
import datetime
import os
BLOG_TEST = 1
WIKI_TEST = 0
if BLOG_TEST:
VISIT_RATE = 4
VISIT_UNIT = "days"
NUM_USERS = 20
PERCENT_NEW = 0.3
FROM_DATE = datetime.datetime(2010, 05, 01)
TO_DATE = datetime.datetime(2010, 05, 07)
SITE = OnePageBlog('')
TEMPLATE_ENNDPOINT = "/static/pages/blog.html"
DATA_ENNDPOINT = "/blog/entries"
PRERENDERED_ENNDPOINT = "/blog/traditional"
if WIKI_TEST:
# VISIT_RATE = 4
VISIT_UNIT = "days"
# NUM_USERS = 20
# PERCENT_NEW = 0.3
FROM_DATE = datetime.datetime(2010, 05, 01)
TO_DATE = datetime.datetime(2010, 05, 15)
SITE = Wiki("", 0.5)
# TEMPLATE_ENNDPOINT = "/static/pages/blog.html"
# DATA_ENNDPOINT = "/blog/entries"
# PRERENDERED_ENNDPOINT = "/blog/traditional"
tick_hash = {VISIT_UNIT : 1}
def create_users(number, percent_new, visit_rate, visit_unit):
# Generate the users
users = []
for i in range(number):
last_time = None
if (random() >= percent_new):
time_delta = expovariate(visit_rate)
delta_hash = {visit_unit : time_delta}
last_time = FROM_DATE - datetime.timedelta(**delta_hash)
user = User(visit_rate, visit_unit, last_time)
user.plan_next_visit()
users.append(user)
else:
last_time = None
user = User(visit_rate, visit_unit, last_time)
delta = random() * (FROM_DATE - TO_DATE).microseconds
user.next_visit_time = FROM_DATE + datetime.timedelta(microseconds=delta)
users.append(user)
return users
def saveToFile(obj, filename):
output = open(filename, 'wb')
pickle.dump(obj, output)
output.close()
def loadFromFile(filename):
input = open(filename, 'rb')
data = pickle.load(input)
input.close()
return data
def run_test(site, users):
all_visits = []
# Now step forward in time
now = FROM_DATE
while now < TO_DATE:
# For all users who have a planned visit
for user in users:
if user.next_visit_time <= now:
all_visits.append(user.perform_next_visit(SITE))
# Advance the clock
now += datetime.timedelta(**tick_hash)
return all_visits
def query_for_visit(visit, strategy):
if strategy == 'tokyo':
return 'queries={"Posts":{"now":"%s"}}' % (str(visit.this_time))
elif strategy == 'traditional':
return 'queries={"Posts":{"now":"%s"}}' % (str(visit.this_time))
else:
if visit.last_time == None:
return 'queries={"Posts":{"now":"%s"}}' % (str(visit.this_time))
else:
return 'queries={"Posts":{"now":"%s", "max":"%s"}}' % (str(visit.this_time), str(visit.last_time))
def url_strings_for_visit(visit, strategy):
page = visit.click_trail.path[0]
strings = []
if strategy == 'traditional':
strings.append("%s%s method=POST contents='%s'" % (page.url, PRERENDERED_ENNDPOINT, query_for_visit(visit, strategy)))
else:
if visit.last_time == None:
strings.append(page.url + TEMPLATE_ENNDPOINT)
strings.append(" %s%s method=POST contents='%s'" % (page.url, DATA_ENNDPOINT, query_for_visit(visit, strategy)))
strings.append(" /static/manifest")
else:
strings.append("%s%s method=POST contents='%s'" % (page.url, DATA_ENNDPOINT, query_for_visit(visit, strategy)))
return strings
def write_httperf_file(urls, filename, header=""):
output = open(filename, 'wb')
if len(header) > 0:
output.write(header)
for url in urls:
output.write(url + '\n')
output.close()
def write_json_file(users, filename, baseurl, header=""):
output = open(filename, 'wb')
if len(header) > 0:
output.write("// %s" % (header) + '\n')
output.write("runTestWithUsers([\n")
visits = [user.visits_to_json(baseurl) for user in users]
output.write(",".join(visits))
output.write("]);\n")
output.close()
# --------------------------------------------------------------------------------
# | The Test
# --------------------------------------------------------------------------------
def ensure_directory(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def write_test_files(directory_name, test_name, num_users, percent_new, num_visits, in_period):
ensure_directory(directory_name)
users = create_users(num_users, percent_new, num_visits, in_period)
visits = run_test(SITE,users)
for strategy in ('synckit', 'tokyo', 'traditional'):
# Write The Server-Oriented Tests
urls = []
for visit in visits:
urls.extend(url_strings_for_visit(visit, strategy))
urls.append("")
comments = "# Test Name: %s\n# Strategy: %s\n# Number Users: %s\n# Percent New: %s\n# Number Visits: %s / %s\n" % (test_name, strategy, str(num_users), str(percent_new), str(num_visits), str(in_period))
write_httperf_file(urls, "%s/%s_%s.txt" % (directory_name, test_name, strategy), header=comments)
# Write The Client-Oriented Tests
if strategy == 'synckit':
url = '/static/pages/blog.html'
elif strategy == 'tokyo':
url = '/static/pages/blog-flying.html'
elif strategy == 'traditional':
url = '/blog/traditional'
comments = "# Test Name: %s # Strategy: %s # Number Users: %s # Percent New: %s # Number Visits: %s / %s\n" % (test_name, strategy, str(num_users), str(percent_new), str(num_visits), str(in_period))
write_json_file(users, "%s/%s_%s.js" % (directory_name, test_name, strategy), url, header=comments)
now = datetime.datetime.now()
dirname = now.strftime("%Y-%m-%d.%H:%M:%S")
if BLOG_TEST:
# print "NOTE! need to make num users 100 and new usrs rate .5"
write_test_files(dirname, "test_freq_4_per_update", 100, 0.5, 48, "days")
write_test_files(dirname, "test_freq_3_per_update", 100, 0.5, 36, "days")
write_test_files(dirname, "test_freq_2_per_update", 100, 0.5, 24, "days")
write_test_files(dirname, "test_freq_1_per_update", 100, 0.5, 12, "days")
write_test_files(dirname, "test_freq_0.5_per_update", 100, 0.5, 6, "days")
write_test_files(dirname, "test_freq_0.42_per_update", 100, 0.5, 5, "days")
write_test_files(dirname, "test_freq_0.33_per_update", 100, 0.5, 4, "days")
write_test_files(dirname, "test_freq_0.25_per_update", 100, 0.5, 3, "days")
write_test_files(dirname, "test_freq_0.16_per_update", 100, 0.5, 2, "days")
write_test_files(dirname, "test_freq_0.08_per_update", 100, 0.5, 1, "days")
if WIKI_TEST:
write_test_files(dirname, "test_freq_6_per_day", 40, 0.0, 1, "days")
# if WIKI_TEST:
# PRINT ALL USERS
#for i in range(len(users)):
# user = users[i]
# print "User " + str(i)
# print "----------------------------"
# for visit in user.visits:
# print str(visit)
# PRINT ALL VISITS
#for visit in all_visits:
# page = visit.click_trail.path[0]
# if visit.last_time == None:
# print page.url + "template_file.html"
# print " " + page.url + "data_endpoint.json method=POST contents=queries={\"Posts\":{\"now\":\"" + str(visit.this_time) + "\"}}"
# print
# else:
# print page.url + "data_endpoint.json method=POST contents=queries={\"Posts\":{\"max\":\"" + str(visit.last_time) + "\", \"now\":\"" + str(visit.this_time) + "\"}}"
# print
# print str(visit)
|
dirkcuys/save4life
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# save4life-api documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 22 09:07:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'save4life-api'
copyright = u'2015, Dirk Uys'
author = u'Dirk Uys'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../VERSION') as fp:
version = fp.read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'save4life-apidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'save4life-api.tex', u'save4life-api Documentation',
u'Dirk Uys', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'save4life-api', u'save4life-api Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'save4life-api', u'save4life-api Documentation',
author, 'save4life-api', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
huangyh09/brie
|
refs/heads/master
|
brie/models/__init__.py
|
1
|
from .model_TFProb import BRIE2
from .model_wrap import fit_BRIE_matrix, fitBRIE
from .base_model import get_CI95, BRIE_base_lik, LogitNormal
|
vegitron/pyodbc
|
refs/heads/master
|
tests2/mysqltests.py
|
22
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
usage = """\
usage: %prog [options] connection_string
Unit tests for MySQL. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and
t2 as necessary. The default installation of mysql allows you to connect locally with no password and already contains
a 'test' database, so you can probably use the following. (Update the driver name as appropriate.)
./mysqltests DRIVER={MySQL};DATABASE=test
These tests use the pyodbc library from the build directory, not the version installed in your
Python directories. You must run `python setup.py build` before running these tests.
"""
import sys, os, re
import unittest
from decimal import Decimal
from datetime import datetime, date, time
from os.path import join, getsize, dirname, abspath, basename
from testutils import *
_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-'
def _generate_test_string(length):
"""
Returns a string of composed of `seed` to make a string `length` characters long.
To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are
tested with 3 lengths. This function helps us generate the test data.
We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will
be hidden and to help us manually identify where a break occurs.
"""
if length <= len(_TESTSTR):
return _TESTSTR[:length]
c = (length + len(_TESTSTR)-1) / len(_TESTSTR)
v = _TESTSTR * c
return v[:length]
class MySqlTestCase(unittest.TestCase):
SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ]
LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ]
ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ]
UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ]
BLOB_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]
def __init__(self, method_name, connection_string):
unittest.TestCase.__init__(self, method_name)
self.connection_string = connection_string
def setUp(self):
self.cnxn = pyodbc.connect(self.connection_string)
self.cursor = self.cnxn.cursor()
for i in range(3):
try:
self.cursor.execute("drop table t%d" % i)
self.cnxn.commit()
except:
pass
for i in range(3):
try:
self.cursor.execute("drop procedure proc%d" % i)
self.cnxn.commit()
except:
pass
self.cnxn.rollback()
def tearDown(self):
try:
self.cursor.close()
self.cnxn.close()
except:
# If we've already closed the cursor or connection, exceptions are thrown.
pass
def test_multiple_bindings(self):
"More than one bind and select on a cursor"
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", 1)
self.cursor.execute("insert into t1 values (?)", 2)
self.cursor.execute("insert into t1 values (?)", 3)
for i in range(3):
self.cursor.execute("select n from t1 where n < ?", 10)
self.cursor.execute("select n from t1 where n < 3")
def test_different_bindings(self):
self.cursor.execute("create table t1(n int)")
self.cursor.execute("create table t2(d datetime)")
self.cursor.execute("insert into t1 values (?)", 1)
self.cursor.execute("insert into t2 values (?)", datetime.now())
def test_datasources(self):
p = pyodbc.dataSources()
self.assert_(isinstance(p, dict))
def test_getinfo_string(self):
value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
self.assert_(isinstance(value, str))
def test_getinfo_bool(self):
value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
self.assert_(isinstance(value, bool))
def test_getinfo_int(self):
value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
self.assert_(isinstance(value, (int, long)))
def test_getinfo_smallint(self):
value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
self.assert_(isinstance(value, int))
def _test_strtype(self, sqltype, value, colsize=None):
"""
The implementation for string, Unicode, and binary tests.
"""
assert colsize is None or (value is None or colsize >= len(value))
if colsize:
sql = "create table t1(s %s(%s))" % (sqltype, colsize)
else:
sql = "create table t1(s %s)" % sqltype
try:
self.cursor.execute(sql)
except:
print '>>>>', sql
self.cursor.execute("insert into t1 values(?)", value)
v = self.cursor.execute("select * from t1").fetchone()[0]
# Removing this check for now until I get the charset working properly.
# If we use latin1, results are 'str' instead of 'unicode', which would be
# correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME).
# self.assertEqual(type(v), type(value))
if value is not None:
self.assertEqual(len(v), len(value))
self.assertEqual(v, value)
#
# varchar
#
def test_varchar_null(self):
self._test_strtype('varchar', None, 100)
# Generate a test for each fencepost size: test_varchar_0, etc.
def _maketest(value):
def t(self):
self._test_strtype('varchar', value, max(1, len(value)))
return t
for value in ANSI_FENCEPOSTS:
locals()['test_varchar_%s' % len(value)] = _maketest(value)
# Generate a test using Unicode.
for value in UNICODE_FENCEPOSTS:
locals()['test_wvarchar_%s' % len(value)] = _maketest(value)
def test_varchar_many(self):
self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))")
v1 = 'ABCDEFGHIJ' * 30
v2 = '0123456789' * 30
v3 = '9876543210' * 30
self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3);
row = self.cursor.execute("select c1, c2, c3 from t1").fetchone()
self.assertEqual(v1, row.c1)
self.assertEqual(v2, row.c2)
self.assertEqual(v3, row.c3)
def test_varchar_upperlatin(self):
self._test_strtype('varchar', 'á', colsize=3)
#
# binary
#
def test_null_binary(self):
self._test_strtype('varbinary', None, 100)
def test_large_null_binary(self):
# Bug 1575064
self._test_strtype('varbinary', None, 4000)
# Generate a test for each fencepost size: test_binary_0, etc.
def _maketest(value):
def t(self):
self._test_strtype('varbinary', bytearray(value), max(1, len(value)))
return t
for value in ANSI_FENCEPOSTS:
locals()['test_binary_%s' % len(value)] = _maketest(value)
#
# blob
#
def test_blob_null(self):
self._test_strtype('blob', None)
# Generate a test for each fencepost size: test_blob_0, etc.
def _maketest(value):
def t(self):
self._test_strtype('blob', bytearray(value))
return t
for value in BLOB_FENCEPOSTS:
locals()['test_blob_%s' % len(value)] = _maketest(value)
def test_blob_upperlatin(self):
self._test_strtype('blob', bytearray('á'))
#
# text
#
def test_null_text(self):
self._test_strtype('text', None)
# Generate a test for each fencepost size: test_text_0, etc.
def _maketest(value):
def t(self):
self._test_strtype('text', value)
return t
for value in ANSI_FENCEPOSTS:
locals()['test_text_%s' % len(value)] = _maketest(value)
def test_text_upperlatin(self):
self._test_strtype('text', 'á')
#
# unicode
#
def test_unicode_query(self):
self.cursor.execute(u"select 1")
#
# bit
#
# The MySQL driver maps BIT colums to the ODBC bit data type, but they aren't behaving quite like a Boolean value
# (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small
# integer, so pyodbc can't recognize it and map it back to True/False.
#
# You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and
# 1 to them and they will work.
# def test_bit(self):
# value = True
# self.cursor.execute("create table t1(b bit)")
# self.cursor.execute("insert into t1 values (?)", value)
# v = self.cursor.execute("select b from t1").fetchone()[0]
# self.assertEqual(type(v), bool)
# self.assertEqual(v, value)
#
# def test_bit_string_true(self):
# self.cursor.execute("create table t1(b bit)")
# self.cursor.execute("insert into t1 values (?)", "xyzzy")
# v = self.cursor.execute("select b from t1").fetchone()[0]
# self.assertEqual(type(v), bool)
# self.assertEqual(v, True)
#
# def test_bit_string_false(self):
# self.cursor.execute("create table t1(b bit)")
# self.cursor.execute("insert into t1 values (?)", "")
# v = self.cursor.execute("select b from t1").fetchone()[0]
# self.assertEqual(type(v), bool)
# self.assertEqual(v, False)
#
# decimal
#
def test_small_decimal(self):
# value = Decimal('1234567890987654321')
value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct)
self.cursor.execute("create table t1(d numeric(19))")
self.cursor.execute("insert into t1 values(?)", value)
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), Decimal)
self.assertEqual(v, value)
def test_small_decimal_scale(self):
# The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation
# example in the C Data Types appendix.
value = '1000.10'
value = Decimal(value)
self.cursor.execute("create table t1(d numeric(20,6))")
self.cursor.execute("insert into t1 values(?)", value)
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), Decimal)
self.assertEqual(v, value)
def test_negative_decimal_scale(self):
value = Decimal('-10.0010')
self.cursor.execute("create table t1(d numeric(19,4))")
self.cursor.execute("insert into t1 values(?)", value)
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), Decimal)
self.assertEqual(v, value)
def test_subquery_params(self):
"""Ensure parameter markers work in a subquery"""
self.cursor.execute("create table t1(id integer, s varchar(20))")
self.cursor.execute("insert into t1 values (?,?)", 1, 'test')
row = self.cursor.execute("""
select x.id
from (
select id
from t1
where s = ?
and id between ? and ?
) x
""", 'test', 1, 10).fetchone()
self.assertNotEqual(row, None)
self.assertEqual(row[0], 1)
def _exec(self):
self.cursor.execute(self.sql)
def test_close_cnxn(self):
"""Make sure using a Cursor after closing its connection doesn't crash."""
self.cursor.execute("create table t1(id integer, s varchar(20))")
self.cursor.execute("insert into t1 values (?,?)", 1, 'test')
self.cursor.execute("select * from t1")
self.cnxn.close()
# Now that the connection is closed, we expect an exception. (If the code attempts to use
# the HSTMT, we'll get an access violation instead.)
self.sql = "select * from t1"
self.assertRaises(pyodbc.ProgrammingError, self._exec)
def test_empty_string(self):
self.cursor.execute("create table t1(s varchar(20))")
self.cursor.execute("insert into t1 values(?)", "")
def test_fixed_str(self):
value = "testing"
self.cursor.execute("create table t1(s char(7))")
self.cursor.execute("insert into t1 values(?)", "testing")
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), str)
self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL
self.assertEqual(v, value)
def test_negative_row_index(self):
self.cursor.execute("create table t1(s varchar(20))")
self.cursor.execute("insert into t1 values(?)", "1")
row = self.cursor.execute("select * from t1").fetchone()
self.assertEquals(row[0], "1")
self.assertEquals(row[-1], "1")
def test_version(self):
self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc.
#
# date, time, datetime
#
def test_datetime(self):
value = datetime(2007, 1, 15, 3, 4, 5)
self.cursor.execute("create table t1(dt datetime)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select dt from t1").fetchone()[0]
self.assertEquals(value, result)
def test_date(self):
value = date(2001, 1, 1)
self.cursor.execute("create table t1(dt date)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select dt from t1").fetchone()[0]
self.assertEquals(type(result), type(value))
self.assertEquals(result, value)
#
# ints and floats
#
def test_int(self):
value = 1234
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEquals(result, value)
def test_negative_int(self):
value = -1
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEquals(result, value)
def test_bigint(self):
# This fails on 64-bit Fedora with 5.1.
# Should return 0x0123456789
# Does return 0x0000000000
#
# Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF.
input = 0x123456789
self.cursor.execute("create table t1(d bigint)")
self.cursor.execute("insert into t1 values (?)", input)
result = self.cursor.execute("select d from t1").fetchone()[0]
self.assertEqual(result, input)
def test_float(self):
value = 1234.5
self.cursor.execute("create table t1(n float)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEquals(result, value)
def test_negative_float(self):
value = -200
self.cursor.execute("create table t1(n float)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(value, result)
def test_date(self):
value = date.today()
self.cursor.execute("create table t1(d date)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select d from t1").fetchone()[0]
self.assertEquals(value, result)
def test_time(self):
value = datetime.now().time()
# We aren't yet writing values using the new extended time type so the value written to the database is only
# down to the second.
value = value.replace(microsecond=0)
self.cursor.execute("create table t1(t time)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select t from t1").fetchone()[0]
self.assertEquals(value, result)
#
# misc
#
def test_rowcount_delete(self):
self.assertEquals(self.cursor.rowcount, -1)
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.cursor.execute("delete from t1")
self.assertEquals(self.cursor.rowcount, count)
def test_rowcount_nodata(self):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over
the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a
zero return value.
"""
self.cursor.execute("create table t1(i int)")
# This is a different code path internally.
self.cursor.execute("delete from t1")
self.assertEquals(self.cursor.rowcount, 0)
def test_rowcount_select(self):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount
or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount.
"""
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.cursor.execute("select * from t1")
self.assertEquals(self.cursor.rowcount, count)
rows = self.cursor.fetchall()
self.assertEquals(len(rows), count)
self.assertEquals(self.cursor.rowcount, count)
def test_rowcount_reset(self):
"Ensure rowcount is reset to -1"
# The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no
# records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not
# sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0.
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.assertEquals(self.cursor.rowcount, 1)
self.cursor.execute("create table t2(i int)")
self.assertEquals(self.cursor.rowcount, 0)
def test_lower_case(self):
"Ensure pyodbc.lowercase forces returned column names to lowercase."
# Has to be set before creating the cursor, so we must recreate self.cursor.
pyodbc.lowercase = True
self.cursor = self.cnxn.cursor()
self.cursor.execute("create table t1(Abc int, dEf int)")
self.cursor.execute("select * from t1")
names = [ t[0] for t in self.cursor.description ]
names.sort()
self.assertEquals(names, [ "abc", "def" ])
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(self):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
self.cursor = self.cnxn.cursor()
self.cursor.execute("create table t1(a int, b char(3))")
self.cnxn.commit()
self.cursor.execute("insert into t1 values(1, 'abc')")
row = self.cursor.execute("select * from t1").fetchone()
self.assertEquals(self.cursor.description, row.cursor_description)
def test_executemany(self):
self.cursor.execute("create table t1(a int, b varchar(10))")
params = [ (i, str(i)) for i in range(1, 6) ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = self.cursor.execute("select count(*) from t1").fetchone()[0]
self.assertEqual(count, len(params))
self.cursor.execute("select a, b from t1 order by a")
rows = self.cursor.fetchall()
self.assertEqual(count, len(rows))
for param, row in zip(params, rows):
self.assertEqual(param[0], row[0])
self.assertEqual(param[1], row[1])
def test_executemany_one(self):
"Pass executemany a single sequence"
self.cursor.execute("create table t1(a int, b varchar(10))")
params = [ (1, "test") ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = self.cursor.execute("select count(*) from t1").fetchone()[0]
self.assertEqual(count, len(params))
self.cursor.execute("select a, b from t1 order by a")
rows = self.cursor.fetchall()
self.assertEqual(count, len(rows))
for param, row in zip(params, rows):
self.assertEqual(param[0], row[0])
self.assertEqual(param[1], row[1])
# REVIEW: The following fails. Research.
# def test_executemany_failure(self):
# """
# Ensure that an exception is raised if one query in an executemany fails.
# """
# self.cursor.execute("create table t1(a int, b varchar(10))")
#
# params = [ (1, 'good'),
# ('error', 'not an int'),
# (3, 'good') ]
#
# self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params)
def test_row_slicing(self):
self.cursor.execute("create table t1(a int, b int, c int, d int)");
self.cursor.execute("insert into t1 values(1,2,3,4)")
row = self.cursor.execute("select * from t1").fetchone()
result = row[:]
self.failUnless(result is row)
result = row[:-1]
self.assertEqual(result, (1,2,3))
result = row[0:4]
self.failUnless(result is row)
def test_row_repr(self):
self.cursor.execute("create table t1(a int, b int, c int, d int)");
self.cursor.execute("insert into t1 values(1,2,3,4)")
row = self.cursor.execute("select * from t1").fetchone()
result = str(row)
self.assertEqual(result, "(1, 2, 3, 4)")
result = str(row[:-1])
self.assertEqual(result, "(1, 2, 3)")
result = str(row[:1])
self.assertEqual(result, "(1,)")
def test_autocommit(self):
self.assertEqual(self.cnxn.autocommit, False)
othercnxn = pyodbc.connect(self.connection_string, autocommit=True)
self.assertEqual(othercnxn.autocommit, True)
othercnxn.autocommit = False
self.assertEqual(othercnxn.autocommit, False)
def main():
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)")
parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items")
parser.add_option("-t", "--test", help="Run only the named test")
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error('Only one argument is allowed. Do you need quotes around the connection string?')
if not args:
filename = basename(sys.argv[0])
assert filename.endswith('.py')
connection_string = load_setup_connection_string(filename[:-3])
if not connection_string:
parser.print_help()
raise SystemExit()
else:
connection_string = args[0]
cnxn = pyodbc.connect(connection_string)
print_library_info(cnxn)
cnxn.close()
suite = load_tests(MySqlTestCase, options.test, connection_string)
testRunner = unittest.TextTestRunner(verbosity=options.verbose)
result = testRunner.run(suite)
if __name__ == '__main__':
# Add the build directory to the path so we're testing the latest build, not the installed version.
add_to_path()
import pyodbc
main()
|
kevthehermit/YaraManager
|
refs/heads/master
|
rule_manager/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
EpicCM/SPH-D700-Kernel
|
refs/heads/SPH-D700
|
external/webkit/WebKitTools/Scripts/webkitpy/style/checker.py
|
3
|
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Front end of some style-checker modules."""
import codecs
import getopt
import os.path
import sys
from .. style_references import parse_patch
from error_handlers import DefaultStyleErrorHandler
from error_handlers import PatchStyleErrorHandler
from filter import validate_filter_rules
from filter import FilterConfiguration
from processors.common import check_no_carriage_return
from processors.common import categories as CommonCategories
from processors.cpp import CppProcessor
from processors.text import TextProcessor
# These defaults are used by check-webkit-style.
WEBKIT_DEFAULT_VERBOSITY = 1
WEBKIT_DEFAULT_OUTPUT_FORMAT = 'emacs'
# FIXME: For style categories we will never want to have, remove them.
# For categories for which we want to have similar functionality,
# modify the implementation and enable them.
#
# Throughout this module, we use "filter rule" rather than "filter"
# for an individual boolean filter flag like "+foo". This allows us to
# reserve "filter" for what one gets by collectively applying all of
# the filter rules.
#
# The _WEBKIT_FILTER_RULES are prepended to any user-specified filter
# rules. Since by default all errors are on, only include rules that
# begin with a - sign.
WEBKIT_DEFAULT_FILTER_RULES = [
'-build/endif_comment',
'-build/include_what_you_use', # <string> for std::string
'-build/storage_class', # const static
'-legal/copyright',
'-readability/multiline_comment',
'-readability/braces', # int foo() {};
'-readability/fn_size',
'-readability/casting',
'-readability/function',
'-runtime/arrays', # variable length array
'-runtime/casting',
'-runtime/sizeof',
'-runtime/explicit', # explicit
'-runtime/virtual', # virtual dtor
'-runtime/printf',
'-runtime/threadsafe_fn',
'-runtime/rtti',
'-whitespace/blank_line',
'-whitespace/end_of_line',
'-whitespace/labels',
]
# FIXME: Change the second value of each tuple from a tuple to a list,
# and alter the filter code so it accepts lists instead. (The
# filter code will need to convert incoming values from a list
# to a tuple prior to caching). This will make this
# configuration setting a bit simpler since tuples have an
# unusual syntax case.
#
# The path-specific filter rules.
#
# This list is order sensitive. Only the first path substring match
# is used. See the FilterConfiguration documentation in filter.py
# for more information on this list.
_PATH_RULES_SPECIFIER = [
# Files in these directories are consumers of the WebKit
# API and therefore do not follow the same header including
# discipline as WebCore.
(["WebKitTools/WebKitAPITest/",
"WebKit/qt/QGVLauncher/"],
("-build/include",
"-readability/streams")),
([# The GTK+ APIs use GTK+ naming style, which includes
# lower-cased, underscore-separated values.
"WebKit/gtk/webkit/",
# There is no clean way to avoid "yy_*" names used by flex.
"WebCore/css/CSSParser.cpp",
# There is no clean way to avoid "xxx_data" methods inside
# Qt's autotests since they are called automatically by the
# QtTest module.
"WebKit/qt/tests/",
"JavaScriptCore/qt/tests"],
("-readability/naming",)),
# These are test file patterns.
(["_test.cpp",
"_unittest.cpp",
"_regtest.cpp"],
("-readability/streams", # Many unit tests use cout.
"-runtime/rtti")),
]
# Some files should be skipped when checking style. For example,
# WebKit maintains some files in Mozilla style on purpose to ease
# future merges.
#
# Include a warning for skipped files that are less obvious.
SKIPPED_FILES_WITH_WARNING = [
# The Qt API and tests do not follow WebKit style.
# They follow Qt style. :)
"gtk2drawing.c", # WebCore/platform/gtk/gtk2drawing.c
"gtk2drawing.h", # WebCore/platform/gtk/gtk2drawing.h
"JavaScriptCore/qt/api/",
"WebKit/gtk/tests/",
"WebKit/qt/Api/",
"WebKit/qt/tests/",
]
# Don't include a warning for skipped files that are more common
# and more obvious.
SKIPPED_FILES_WITHOUT_WARNING = [
"LayoutTests/"
]
# The maximum number of errors to report per file, per category.
# If a category is not a key, then it has no maximum.
MAX_REPORTS_PER_CATEGORY = {
"whitespace/carriage_return": 1
}
def style_categories():
"""Return the set of all categories used by check-webkit-style."""
# Take the union across all processors.
return CommonCategories.union(CppProcessor.categories)
def webkit_argument_defaults():
"""Return the DefaultArguments instance for use by check-webkit-style."""
return ArgumentDefaults(WEBKIT_DEFAULT_OUTPUT_FORMAT,
WEBKIT_DEFAULT_VERBOSITY,
WEBKIT_DEFAULT_FILTER_RULES)
def _create_usage(defaults):
"""Return the usage string to display for command help.
Args:
defaults: An ArgumentDefaults instance.
"""
usage = """
Syntax: %(program_name)s [--verbose=#] [--git-commit=<SingleCommit>] [--output=vs7]
[--filter=-x,+y,...] [file] ...
The style guidelines this tries to follow are here:
http://webkit.org/coding/coding-style.html
Every style error is given a confidence score from 1-5, with 5 meaning
we are certain of the problem, and 1 meaning it could be a legitimate
construct. This can miss some errors and does not substitute for
code review.
To prevent specific lines from being linted, add a '// NOLINT' comment to the
end of the line.
Linted extensions are .cpp, .c and .h. Other file types are ignored.
The file parameter is optional and accepts multiple files. Leaving
out the file parameter applies the check to all files considered changed
by your source control management system.
Flags:
verbose=#
A number 1-5 that restricts output to errors with a confidence
score at or above this value. In particular, the value 1 displays
all errors. The default is %(default_verbosity)s.
git-commit=<SingleCommit>
Checks the style of everything from the given commit to the local tree.
output=vs7
The output format, which may be one of
emacs : to ease emacs parsing
vs7 : compatible with Visual Studio
Defaults to "%(default_output_format)s". Other formats are unsupported.
filter=-x,+y,...
A comma-separated list of boolean filter rules used to filter
which categories of style guidelines to check. The script checks
a category if the category passes the filter rules, as follows.
Any webkit category starts out passing. All filter rules are then
evaluated left to right, with later rules taking precedence. For
example, the rule "+foo" passes any category that starts with "foo",
and "-foo" fails any such category. The filter input "-whitespace,
+whitespace/braces" fails the category "whitespace/tab" and passes
"whitespace/braces".
Examples: --filter=-whitespace,+whitespace/braces
--filter=-whitespace,-runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
Category names appear in error messages in brackets, for example
[whitespace/indent]. To see a list of all categories available to
%(program_name)s, along with which are enabled by default, pass
the empty filter as follows:
--filter=
""" % {'program_name': os.path.basename(sys.argv[0]),
'default_verbosity': defaults.verbosity,
'default_output_format': defaults.output_format}
return usage
# FIXME: Eliminate support for "extra_flag_values".
#
# FIXME: Remove everything from ProcessorOptions except for the
# information that can be passed via the command line, and
# rename to something like CheckWebKitStyleOptions. This
# includes, but is not limited to, removing the
# max_reports_per_error attribute and the is_reportable()
# method. See also the FIXME below to create a new class
# called something like CheckerConfiguration.
#
# This class should not have knowledge of the flag key names.
class ProcessorOptions(object):
"""A container to store options passed via the command line.
Attributes:
extra_flag_values: A string-string dictionary of all flag key-value
pairs that are not otherwise represented by this
class. The default is the empty dictionary.
filter_configuration: A FilterConfiguration instance. The default
is the "empty" filter configuration, which
means that all errors should be checked.
git_commit: A string representing the git commit to check.
The default is None.
max_reports_per_error: The maximum number of errors to report
per file, per category.
output_format: A string that is the output format. The supported
output formats are "emacs" which emacs can parse
and "vs7" which Microsoft Visual Studio 7 can parse.
verbosity: An integer between 1-5 inclusive that restricts output
to errors with a confidence score at or above this value.
The default is 1, which reports all errors.
"""
def __init__(self,
extra_flag_values=None,
filter_configuration = None,
git_commit=None,
max_reports_per_category=None,
output_format="emacs",
verbosity=1):
if extra_flag_values is None:
extra_flag_values = {}
if filter_configuration is None:
filter_configuration = FilterConfiguration()
if max_reports_per_category is None:
max_reports_per_category = {}
if output_format not in ("emacs", "vs7"):
raise ValueError('Invalid "output_format" parameter: '
'value must be "emacs" or "vs7". '
'Value given: "%s".' % output_format)
if (verbosity < 1) or (verbosity > 5):
raise ValueError('Invalid "verbosity" parameter: '
"value must be an integer between 1-5 inclusive. "
'Value given: "%s".' % verbosity)
self.extra_flag_values = extra_flag_values
self.filter_configuration = filter_configuration
self.git_commit = git_commit
self.max_reports_per_category = max_reports_per_category
self.output_format = output_format
self.verbosity = verbosity
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this ProcessorOptions instance is equal to another."""
if self.extra_flag_values != other.extra_flag_values:
return False
if self.filter_configuration != other.filter_configuration:
return False
if self.git_commit != other.git_commit:
return False
if self.max_reports_per_category != other.max_reports_per_category:
return False
if self.output_format != other.output_format:
return False
if self.verbosity != other.verbosity:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
def is_reportable(self, category, confidence_in_error, path):
"""Return whether an error is reportable.
An error is reportable if the confidence in the error
is at least the current verbosity level, and if the current
filter says that the category should be checked for the
given path.
Args:
category: A string that is a style category.
confidence_in_error: An integer between 1 and 5, inclusive, that
represents the application's confidence in
the error. A higher number signifies greater
confidence.
path: The path of the file being checked
"""
if confidence_in_error < self.verbosity:
return False
return self.filter_configuration.should_check(category, path)
# This class should not have knowledge of the flag key names.
class ArgumentDefaults(object):
"""A container to store default argument values.
Attributes:
output_format: A string that is the default output format.
verbosity: An integer that is the default verbosity level.
base_filter_rules: A list of strings that are boolean filter rules
to prepend to any user-specified rules.
"""
def __init__(self, default_output_format, default_verbosity,
default_base_filter_rules):
self.output_format = default_output_format
self.verbosity = default_verbosity
self.base_filter_rules = default_base_filter_rules
class ArgumentPrinter(object):
"""Supports the printing of check-webkit-style command arguments."""
def _flag_pair_to_string(self, flag_key, flag_value):
return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value }
def to_flag_string(self, options):
"""Return a flag string yielding the given ProcessorOptions instance.
This method orders the flag values alphabetically by the flag key.
Args:
options: A ProcessorOptions instance.
"""
flags = options.extra_flag_values.copy()
flags['output'] = options.output_format
flags['verbose'] = options.verbosity
# Only include the filter flag if user-provided rules are present.
user_rules = options.filter_configuration.user_rules
if user_rules:
flags['filter'] = ",".join(user_rules)
if options.git_commit:
flags['git-commit'] = options.git_commit
flag_string = ''
# Alphabetizing lets us unit test this method.
for key in sorted(flags.keys()):
flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
return flag_string.strip()
class ArgumentParser(object):
"""Supports the parsing of check-webkit-style command arguments.
Attributes:
defaults: An ArgumentDefaults instance.
create_usage: A function that accepts an ArgumentDefaults instance
and returns a string of usage instructions.
This defaults to the function used to generate the
usage string for check-webkit-style.
doc_print: A function that accepts a string parameter and that is
called to display help messages. This defaults to
sys.stderr.write().
"""
def __init__(self, argument_defaults, create_usage=None, doc_print=None):
if create_usage is None:
create_usage = _create_usage
if doc_print is None:
doc_print = sys.stderr.write
self.defaults = argument_defaults
self.create_usage = create_usage
self.doc_print = doc_print
def _exit_with_usage(self, error_message=''):
"""Exit and print a usage string with an optional error message.
Args:
error_message: A string that is an error message to print.
"""
usage = self.create_usage(self.defaults)
self.doc_print(usage)
if error_message:
sys.exit('\nFATAL ERROR: ' + error_message)
else:
sys.exit(1)
def _exit_with_categories(self):
"""Exit and print the style categories and default filter rules."""
self.doc_print('\nAll categories:\n')
categories = style_categories()
for category in sorted(categories):
self.doc_print(' ' + category + '\n')
self.doc_print('\nDefault filter rules**:\n')
for filter_rule in sorted(self.defaults.base_filter_rules):
self.doc_print(' ' + filter_rule + '\n')
self.doc_print('\n**The command always evaluates the above rules, '
'and before any --filter flag.\n\n')
sys.exit(0)
def _parse_filter_flag(self, flag_value):
"""Parse the --filter flag, and return a list of filter rules.
Args:
flag_value: A string of comma-separated filter rules, for
example "-whitespace,+whitespace/indent".
"""
filters = []
for uncleaned_filter in flag_value.split(','):
filter = uncleaned_filter.strip()
if not filter:
continue
filters.append(filter)
return filters
def parse(self, args, extra_flags=None):
"""Parse the command line arguments to check-webkit-style.
Args:
args: A list of command-line arguments as returned by sys.argv[1:].
extra_flags: A list of flags whose values we want to extract, but
are not supported by the ProcessorOptions class.
An example flag "new_flag=". This defaults to the
empty list.
Returns:
A tuple of (filenames, options)
filenames: The list of filenames to check.
options: A ProcessorOptions instance.
"""
if extra_flags is None:
extra_flags = []
output_format = self.defaults.output_format
verbosity = self.defaults.verbosity
base_rules = self.defaults.base_filter_rules
# The flags already supported by the ProcessorOptions class.
flags = ['help', 'output=', 'verbose=', 'filter=', 'git-commit=']
for extra_flag in extra_flags:
if extra_flag in flags:
raise ValueError('Flag \'%(extra_flag)s is duplicated '
'or already supported.' %
{'extra_flag': extra_flag})
flags.append(extra_flag)
try:
(opts, filenames) = getopt.getopt(args, '', flags)
except getopt.GetoptError:
# FIXME: Settle on an error handling approach: come up
# with a consistent guideline as to when and whether
# a ValueError should be raised versus calling
# sys.exit when needing to interrupt execution.
self._exit_with_usage('Invalid arguments.')
extra_flag_values = {}
git_commit = None
user_rules = []
for (opt, val) in opts:
if opt == '--help':
self._exit_with_usage()
elif opt == '--output':
output_format = val
elif opt == '--verbose':
verbosity = val
elif opt == '--git-commit':
git_commit = val
elif opt == '--filter':
if not val:
self._exit_with_categories()
# Prepend the defaults.
user_rules = self._parse_filter_flag(val)
else:
extra_flag_values[opt] = val
# Check validity of resulting values.
if filenames and (git_commit != None):
self._exit_with_usage('It is not possible to check files and a '
'specific commit at the same time.')
if output_format not in ('emacs', 'vs7'):
raise ValueError('Invalid --output value "%s": The only '
'allowed output formats are emacs and vs7.' %
output_format)
all_categories = style_categories()
validate_filter_rules(user_rules, all_categories)
verbosity = int(verbosity)
if (verbosity < 1) or (verbosity > 5):
raise ValueError('Invalid --verbose value %s: value must '
'be between 1-5.' % verbosity)
filter_configuration = FilterConfiguration(base_rules=base_rules,
path_specific=_PATH_RULES_SPECIFIER,
user_rules=user_rules)
options = ProcessorOptions(extra_flag_values=extra_flag_values,
filter_configuration=filter_configuration,
git_commit=git_commit,
max_reports_per_category=MAX_REPORTS_PER_CATEGORY,
output_format=output_format,
verbosity=verbosity)
return (filenames, options)
# Enum-like idiom
class FileType:
NONE = 1
# Alphabetize remaining types
CPP = 2
TEXT = 3
class ProcessorDispatcher(object):
"""Supports determining whether and how to check style, based on path."""
cpp_file_extensions = (
'c',
'cpp',
'h',
)
text_file_extensions = (
'css',
'html',
'idl',
'js',
'mm',
'php',
'pm',
'py',
'txt',
)
def _file_extension(self, file_path):
"""Return the file extension without the leading dot."""
return os.path.splitext(file_path)[1].lstrip(".")
def should_skip_with_warning(self, file_path):
"""Return whether the given file should be skipped with a warning."""
for skipped_file in SKIPPED_FILES_WITH_WARNING:
if file_path.find(skipped_file) >= 0:
return True
return False
def should_skip_without_warning(self, file_path):
"""Return whether the given file should be skipped without a warning."""
for skipped_file in SKIPPED_FILES_WITHOUT_WARNING:
if file_path.find(skipped_file) >= 0:
return True
return False
def _file_type(self, file_path):
"""Return the file type corresponding to the given file."""
file_extension = self._file_extension(file_path)
if (file_extension in self.cpp_file_extensions) or (file_path == '-'):
# FIXME: Do something about the comment below and the issue it
# raises since cpp_style already relies on the extension.
#
# Treat stdin as C++. Since the extension is unknown when
# reading from stdin, cpp_style tests should not rely on
# the extension.
return FileType.CPP
elif ("ChangeLog" in file_path
or "WebKitTools/Scripts/" in file_path
or file_extension in self.text_file_extensions):
return FileType.TEXT
else:
return FileType.NONE
def _create_processor(self, file_type, file_path, handle_style_error, verbosity):
"""Instantiate and return a style processor based on file type."""
if file_type == FileType.NONE:
processor = None
elif file_type == FileType.CPP:
file_extension = self._file_extension(file_path)
processor = CppProcessor(file_path, file_extension, handle_style_error, verbosity)
elif file_type == FileType.TEXT:
processor = TextProcessor(file_path, handle_style_error)
else:
raise ValueError('Invalid file type "%(file_type)s": the only valid file types '
"are %(NONE)s, %(CPP)s, and %(TEXT)s."
% {"file_type": file_type,
"NONE": FileType.NONE,
"CPP": FileType.CPP,
"TEXT": FileType.TEXT})
return processor
def dispatch_processor(self, file_path, handle_style_error, verbosity):
"""Instantiate and return a style processor based on file path."""
file_type = self._file_type(file_path)
processor = self._create_processor(file_type,
file_path,
handle_style_error,
verbosity)
return processor
# FIXME: When creating the new CheckWebKitStyleOptions class as
# described in a FIXME above, add a new class here called
# something like CheckerConfiguration. The class should contain
# attributes for options needed to process a file. This includes
# a subset of the CheckWebKitStyleOptions attributes, a
# FilterConfiguration attribute, an stderr_write attribute, a
# max_reports_per_category attribute, etc. It can also include
# the is_reportable() method. The StyleChecker should accept
# an instance of this class rather than a ProcessorOptions
# instance.
class StyleChecker(object):
"""Supports checking style in files and patches.
Attributes:
error_count: An integer that is the total number of reported
errors for the lifetime of this StyleChecker
instance.
options: A ProcessorOptions instance that controls the behavior
of style checking.
"""
def __init__(self, options, stderr_write=None):
"""Create a StyleChecker instance.
Args:
options: See options attribute.
stderr_write: A function that takes a string as a parameter
and that is called when a style error occurs.
Defaults to sys.stderr.write. This should be
used only for unit tests.
"""
if stderr_write is None:
stderr_write = sys.stderr.write
self._stderr_write = stderr_write
self.error_count = 0
self.options = options
def _increment_error_count(self):
"""Increment the total count of reported errors."""
self.error_count += 1
def _process_file(self, processor, file_path, handle_style_error):
"""Process the file using the given processor."""
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if file_path == '-':
file = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
else:
file = codecs.open(file_path, 'r', 'utf8', 'replace')
contents = file.read()
except IOError:
self._stderr_write("Skipping input '%s': Can't open for reading\n" % file_path)
return
lines = contents.split("\n")
for line_number in range(len(lines)):
# FIXME: We should probably use the SVN "eol-style" property
# or a white list to decide whether or not to do
# the carriage-return check. Originally, we did the
# check only if (os.linesep != '\r\n').
#
# FIXME: As a minor optimization, we can have
# check_no_carriage_return() return whether
# the line ends with "\r".
check_no_carriage_return(lines[line_number], line_number,
handle_style_error)
if lines[line_number].endswith("\r"):
lines[line_number] = lines[line_number].rstrip("\r")
processor.process(lines)
def check_file(self, file_path, handle_style_error=None, process_file=None):
"""Check style in the given file.
Args:
file_path: A string that is the path of the file to process.
handle_style_error: The function to call when a style error
occurs. This parameter is meant for internal
use within this class. Defaults to a
DefaultStyleErrorHandler instance.
process_file: The function to call to process the file. This
parameter should be used only for unit tests.
Defaults to the file processing method of this class.
"""
if handle_style_error is None:
handle_style_error = DefaultStyleErrorHandler(file_path,
self.options,
self._increment_error_count,
self._stderr_write)
if process_file is None:
process_file = self._process_file
dispatcher = ProcessorDispatcher()
if dispatcher.should_skip_without_warning(file_path):
return
if dispatcher.should_skip_with_warning(file_path):
self._stderr_write('Ignoring "%s": this file is exempt from the '
"style guide.\n" % file_path)
return
verbosity = self.options.verbosity
processor = dispatcher.dispatch_processor(file_path,
handle_style_error,
verbosity)
if processor is None:
return
process_file(processor, file_path, handle_style_error)
def check_patch(self, patch_string):
"""Check style in the given patch.
Args:
patch_string: A string that is a patch string.
"""
patch_files = parse_patch(patch_string)
for file_path, diff in patch_files.iteritems():
style_error_handler = PatchStyleErrorHandler(diff,
file_path,
self.options,
self._increment_error_count,
self._stderr_write)
self.check_file(file_path, style_error_handler)
|
jaseg/python-prompt-toolkit
|
refs/heads/master
|
prompt_toolkit/application.py
|
2
|
from __future__ import unicode_literals
from .buffer import Buffer, AcceptAction
from .clipboard import Clipboard, InMemoryClipboard
from .filters import CLIFilter, Never, to_cli_filter
from .key_binding.bindings.basic import load_basic_bindings
from .key_binding.bindings.emacs import load_emacs_bindings
from .key_binding.registry import Registry
from .layout import Window
from .layout.controls import BufferControl
from .styles import DefaultStyle
from .utils import Callback
from .enums import DEFAULT_BUFFER
from .layout.containers import Layout
__all__ = (
'AbortAction',
'Application',
)
class AbortAction:
"""
Actions to take on an Exit or Abort exception.
"""
IGNORE = 'ignore'
RETRY = 'retry'
RAISE_EXCEPTION = 'raise-exception'
RETURN_NONE = 'return-none'
_all = (IGNORE, RETRY, RAISE_EXCEPTION, RETURN_NONE)
class Application(object):
"""
Application class to be passed to a `CommandLineInterface`.
This contains all customizable logic that is not I/O dependent.
(So, what is independent of event loops, input and output.)
This way, such an `Application` can run easily on several
`CommandLineInterface`s, each with a different I/O backends.
that runs for instance over telnet, SSH or any other I/O backend.
:param layout: A :class:`Layout` instance.
:param buffer: A :class:`Buffer` instance for the default buffer.
:param initial_focussed_buffer: Name of the buffer that is focussed during start-up.
:param key_bindings_registry: :class:`Registry` instance for the key bindings.
:param clipboard: Clipboard to use.
:param on_abort: What to do when Control-C is pressed.
:param on_exit: What to do when Control-D is pressed.
:param use_alternate_screen: When True, run the application on the alternate screen buffer.
:param get_title: Callable that returns the current title to be displayed in the terminal.
Filters:
:param paste_mode: Filter.
:param ignore_case: Filter.
Callbacks:
:param on_input_timeout: Called when there is no input for x seconds.
(Fired when any eventloop.onInputTimeout is fired.)
:param on_start: Called when reading input starts.
:param on_stop: Called when reading input ends.
:param on_reset: Called during reset.
:param on_buffer_changed: Called when another buffer gets the focus.
:param on_initialize: Called after the `CommandLineInterface` initializes.
"""
def __init__(self, layout=None, buffer=None, buffers=None,
initial_focussed_buffer=DEFAULT_BUFFER,
style=None, get_style=None,
key_bindings_registry=None, clipboard=None,
on_abort=AbortAction.RETRY, on_exit=AbortAction.IGNORE,
use_alternate_screen=False,
get_title=None,
paste_mode=Never(), ignore_case=Never(),
on_input_timeout=None, on_start=None, on_stop=None,
on_reset=None, on_initialize=None, on_buffer_changed=None):
paste_mode = to_cli_filter(paste_mode)
ignore_case = to_cli_filter(ignore_case)
assert layout is None or isinstance(layout, Layout)
assert buffer is None or isinstance(buffer, Buffer)
assert buffers is None or isinstance(buffers, dict)
assert key_bindings_registry is None or isinstance(key_bindings_registry, Registry)
assert clipboard is None or isinstance(clipboard, Clipboard)
assert on_abort in AbortAction._all
assert on_exit in AbortAction._all
assert isinstance(use_alternate_screen, bool)
assert get_title is None or callable(get_title)
assert isinstance(paste_mode, CLIFilter)
assert isinstance(ignore_case, CLIFilter)
assert on_start is None or isinstance(on_start, Callback)
assert on_stop is None or isinstance(on_stop, Callback)
assert on_reset is None or isinstance(on_reset, Callback)
assert on_buffer_changed is None or isinstance(on_buffer_changed, Callback)
assert on_initialize is None or isinstance(on_initialize, Callback)
assert not (style and get_style)
self.layout = layout or Window(BufferControl())
self.buffer = buffer or Buffer(accept_action=AcceptAction.RETURN_DOCUMENT)
self.buffers = buffers or {}
self.initial_focussed_buffer = initial_focussed_buffer
if style:
self.get_style = lambda: style
elif get_style:
self.get_style = get_style
else:
self.get_style = lambda: DefaultStyle
if key_bindings_registry is None:
key_bindings_registry = Registry()
load_basic_bindings(key_bindings_registry)
load_emacs_bindings(key_bindings_registry)
if get_title is None:
get_title = lambda: None
self.key_bindings_registry = key_bindings_registry
self.clipboard = clipboard or InMemoryClipboard()
self.on_abort = on_abort
self.on_exit = on_exit
self.use_alternate_screen = use_alternate_screen
self.get_title = get_title
self.paste_mode = paste_mode
self.ignore_case = ignore_case
self.on_input_timeout = on_input_timeout or Callback()
self.on_start = on_start or Callback()
self.on_stop = on_stop or Callback()
self.on_reset = on_reset or Callback()
self.on_initialize = on_initialize or Callback()
self.on_buffer_changed = on_buffer_changed or Callback()
|
WangDequan/fast-rcnn
|
refs/heads/master
|
lib/datasets/pascal_voc.py
|
45
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import datasets
import datasets.pascal_voc
import os
import datasets.imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
class pascal_voc(datasets.imdb):
def __init__(self, image_set, year, devkit_path=None):
datasets.imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_IJCV_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
'{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_IJCV_roidb(self, gt_roidb):
IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_IJCV_data',
'voc_' + self._year))
assert os.path.exists(IJCV_path), \
'Selective search IJCV data not found at: {}'.format(IJCV_path)
top_k = self.config['top_k']
box_list = []
for i in xrange(self.num_images):
filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
raw_data = sio.loadmat(filename)
box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = self._class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
def _write_voc_results_file(self, all_boxes):
use_salt = self.config['use_salt']
comp_id = 'comp4'
if use_salt:
comp_id += '-{}'.format(os.getpid())
# VOCdevkit/results/VOC2007/Main/comp4-44503_det_test_aeroplane.txt
path = os.path.join(self._devkit_path, 'results', 'VOC' + self._year,
'Main', comp_id + '_')
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = path + 'det_' + self._image_set + '_' + cls + '.txt'
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return comp_id
def _do_matlab_eval(self, comp_id, output_dir='output'):
rm_results = self.config['cleanup']
path = os.path.join(os.path.dirname(__file__),
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
.format(self._devkit_path, comp_id,
self._image_set, output_dir, int(rm_results))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
comp_id = self._write_voc_results_file(all_boxes)
self._do_matlab_eval(comp_id, output_dir)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = datasets.pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
|
jasonwzhy/django
|
refs/heads/master
|
tests/generic_relations_regress/models.py
|
269
|
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.deletion import ProtectedError
from django.utils.encoding import python_2_unicode_compatible
__all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address',
'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2',
'Contact', 'Organization', 'Note', 'Company')
@python_2_unicode_compatible
class Link(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return "Link to %s id=%s" % (self.content_type, self.object_id)
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=100)
links = GenericRelation(Link)
def __str__(self):
return "Place: %s" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
def __str__(self):
return "Restaurant: %s" % self.name
@python_2_unicode_compatible
class Address(models.Model):
street = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=5)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode)
@python_2_unicode_compatible
class Person(models.Model):
account = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128)
addresses = GenericRelation(Address)
def __str__(self):
return self.name
class CharLink(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.CharField(max_length=100)
content_object = GenericForeignKey()
class TextLink(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.TextField()
content_object = GenericForeignKey()
class OddRelation1(models.Model):
name = models.CharField(max_length=100)
clinks = GenericRelation(CharLink)
class OddRelation2(models.Model):
name = models.CharField(max_length=100)
tlinks = GenericRelation(TextLink)
# models for test_q_object_or:
class Note(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
note = models.TextField()
class Contact(models.Model):
notes = GenericRelation(Note)
class Organization(models.Model):
name = models.CharField(max_length=255)
contacts = models.ManyToManyField(Contact, related_name='organizations')
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=100)
links = GenericRelation(Link)
def __str__(self):
return "Company: %s" % self.name
# For testing #13085 fix, we also use Note model defined above
class Developer(models.Model):
name = models.CharField(max_length=15)
@python_2_unicode_compatible
class Team(models.Model):
name = models.CharField(max_length=15)
members = models.ManyToManyField(Developer)
def __str__(self):
return "%s team" % self.name
def __len__(self):
return self.members.count()
class Guild(models.Model):
name = models.CharField(max_length=15)
members = models.ManyToManyField(Developer)
def __nonzero__(self):
return self.members.count()
class Tag(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='g_r_r_tags')
object_id = models.CharField(max_length=15)
content_object = GenericForeignKey()
label = models.CharField(max_length=15)
class Board(models.Model):
name = models.CharField(primary_key=True, max_length=15)
class SpecialGenericRelation(GenericRelation):
def __init__(self, *args, **kwargs):
super(SpecialGenericRelation, self).__init__(*args, **kwargs)
self.editable = True
self.save_form_data_calls = 0
def save_form_data(self, *args, **kwargs):
self.save_form_data_calls += 1
class HasLinks(models.Model):
links = SpecialGenericRelation(Link)
class Meta:
abstract = True
class HasLinkThing(HasLinks):
pass
class A(models.Model):
flag = models.NullBooleanField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class B(models.Model):
a = GenericRelation(A)
class Meta:
ordering = ('id',)
class C(models.Model):
b = models.ForeignKey(B, models.CASCADE)
class Meta:
ordering = ('id',)
class D(models.Model):
b = models.ForeignKey(B, models.SET_NULL, null=True)
class Meta:
ordering = ('id',)
# Ticket #22998
class Node(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content = GenericForeignKey('content_type', 'object_id')
class Content(models.Model):
nodes = GenericRelation(Node)
related_obj = models.ForeignKey('Related', models.CASCADE)
class Related(models.Model):
pass
def prevent_deletes(sender, instance, **kwargs):
raise ProtectedError("Not allowed to delete.", [instance])
models.signals.pre_delete.connect(prevent_deletes, sender=Node)
|
MikeAmy/django
|
refs/heads/master
|
tests/migrations/test_add_many_to_many_field_initial/0002_initial.py
|
65
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.AddField(
model_name='task',
name='projects',
field=models.ManyToManyField(to='Project'),
),
]
|
eleonrk/SickRage
|
refs/heads/master
|
lib/future/backports/misc.py
|
36
|
"""
Miscellaneous function (re)definitions from the Py3.4+ standard library
for Python 2.6/2.7.
- math.ceil (for Python 2.7)
- collections.OrderedDict (for Python 2.6)
- collections.Counter (for Python 2.6)
- collections.ChainMap (for all versions prior to Python 3.3)
- itertools.count (for Python 2.6, with step parameter)
- subprocess.check_output (for Python 2.6)
- reprlib.recursive_repr (for Python 2.6+)
- functools.cmp_to_key (for Python 2.6)
"""
from __future__ import absolute_import
import subprocess
from math import ceil as oldceil
from collections import Mapping, MutableMapping
from operator import itemgetter as _itemgetter, eq as _eq
import sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from socket import getaddrinfo, SOCK_STREAM, error, socket
from future.utils import iteritems, itervalues, PY26, PY3
def ceil(x):
"""
Return the ceiling of x as an int.
This is the smallest integral value >= x.
"""
return int(oldceil(x))
########################################################################
### reprlib.recursive_repr decorator from Py3.4
########################################################################
from itertools import islice
if PY3:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
else:
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
# {{{ http://code.activestate.com/recipes/576611/ (r11)
try:
from operator import itemgetter
from heapq import nlargest
except ImportError:
pass
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
def check_output(*popenargs, **kwargs):
"""
For Python 2.6 compatibility: see
http://stackoverflow.com/questions/4814970/
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def count(start=0, step=1):
"""
``itertools.count`` in Py 2.6 doesn't accept a step
parameter. This is an enhanced version of ``itertools.count``
for Py2.6 equivalent to ``itertools.count`` in Python 2.7+.
"""
while True:
yield start
start += step
########################################################################
### ChainMap (helper for configparser and string.Template)
### From the Py3.4 source code. See also:
### https://github.com/kkxue/Py2ChainMap/blob/master/py2chainmap.py
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
# Py2 compatibility:
__nonzero__ = __bool__
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
# Re-use the same sentinel as in the Python stdlib socket module:
from socket import _GLOBAL_DEFAULT_TIMEOUT
# Was: _GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Backport of 3-argument create_connection() for Py2.6.
Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
# Backport from Py2.7 for Py2.6:
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return K
# Back up our definitions above in case they're useful
_OrderedDict = OrderedDict
_Counter = Counter
_check_output = check_output
_count = count
_ceil = ceil
__count_elements = _count_elements
_recursive_repr = recursive_repr
_ChainMap = ChainMap
_create_connection = create_connection
_cmp_to_key = cmp_to_key
# Overwrite the definitions above with the usual ones
# from the standard library:
if sys.version_info >= (2, 7):
from collections import OrderedDict, Counter
from itertools import count
from functools import cmp_to_key
try:
from subprocess import check_output
except ImportError:
# Not available. This happens with Google App Engine: see issue #231
pass
from socket import create_connection
if sys.version_info >= (3, 0):
from math import ceil
from collections import _count_elements
if sys.version_info >= (3, 3):
from reprlib import recursive_repr
from collections import ChainMap
|
alheinecke/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/learn_io/dask_io.py
|
138
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow dask.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
try:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
HAS_DASK = True
except ImportError:
HAS_DASK = False
def _add_to_index(df, start):
"""New dask.dataframe with values added to index of each subdataframe."""
df = df.copy()
df.index += start
return df
def _get_divisions(df):
"""Number of rows in each sub-dataframe."""
lengths = df.map_partitions(len).compute()
divisions = np.cumsum(lengths).tolist()
divisions.insert(0, 0)
return divisions
def _construct_dask_df_with_divisions(df):
"""Construct the new task graph and make a new dask.dataframe around it."""
divisions = _get_divisions(df)
# pylint: disable=protected-access
name = 'csv-index' + df._name
dsk = {(name, i): (_add_to_index, (df._name, i), divisions[i])
for i in range(df.npartitions)}
# pylint: enable=protected-access
from toolz import merge # pylint: disable=g-import-not-at-top
if isinstance(df, dd.DataFrame):
return dd.DataFrame(merge(dsk, df.dask), name, df.columns, divisions)
elif isinstance(df, dd.Series):
return dd.Series(merge(dsk, df.dask), name, df.name, divisions)
def extract_dask_data(data):
"""Extract data from dask.Series or dask.DataFrame for predictors.
Given a distributed dask.DataFrame or dask.Series containing columns or names
for one or more predictors, this operation returns a single dask.DataFrame or
dask.Series that can be iterated over.
Args:
data: A distributed dask.DataFrame or dask.Series.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
"""
if isinstance(data, allowed_classes):
return _construct_dask_df_with_divisions(data)
else:
return data
def extract_dask_labels(labels):
"""Extract data from dask.Series or dask.DataFrame for labels.
Given a distributed dask.DataFrame or dask.Series containing exactly one
column or name, this operation returns a single dask.DataFrame or dask.Series
that can be iterated over.
Args:
labels: A distributed dask.DataFrame or dask.Series with exactly one
column or name.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
Raises:
ValueError: If the supplied dask.DataFrame contains more than one
column or the supplied dask.Series contains more than
one name.
"""
if isinstance(labels, dd.DataFrame):
ncol = labels.columns
elif isinstance(labels, dd.Series):
ncol = labels.name
if isinstance(labels, allowed_classes):
if len(ncol) > 1:
raise ValueError('Only one column for labels is allowed.')
return _construct_dask_df_with_divisions(labels)
else:
return labels
|
rpappalax/jenkins-job-manager
|
refs/heads/dev
|
manager/jenkins.py
|
1
|
import os
import ast
from manager import (
DIR_CACHE
)
from manager.utils import read_file
def job_list():
jobs_list = read_file(DIR_CACHE, 'jobs_list.txt')
return ast.literal_eval(jobs_list)
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/django/utils/jslex.py
|
251
|
"""JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
|
laiqiqi886/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/idlelib/idle_test/test_pathbrowser.py
|
117
|
import unittest
import idlelib.PathBrowser as PathBrowser
class PathBrowserTest(unittest.TestCase):
def test_DirBrowserTreeItem(self):
# Issue16226 - make sure that getting a sublist works
d = PathBrowser.DirBrowserTreeItem('')
d.GetSubList()
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/271_test_peepholer.py
|
6
|
import dis
import sys
from cStringIO import StringIO
import unittest
def disassemble(func):
f = StringIO()
tmp = sys.stdout
sys.stdout = f
dis.dis(func)
sys.stdout = tmp
result = f.getvalue()
f.close()
return result
def dis_single(line):
return disassemble(compile(line, '', 'single'))
class TestTranforms(unittest.TestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE
def unot(x):
if not x == 2:
del x
asm = disassemble(unot)
for elem in ('UNARY_NOT', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
self.assertIn('POP_JUMP_IF_TRUE', asm)
def test_elim_inversion_of_is_or_in(self):
for line, elem in (
('not a is b', '(is not)',),
('not a in b', '(not in)',),
('not a is not b', '(is)',),
('not a not in b', '(in)',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
def test_none_as_constant(self):
# LOAD_GLOBAL None --> LOAD_CONST None
def f(x):
None
return x
asm = disassemble(f)
for elem in ('LOAD_GLOBAL',):
self.assertNotIn(elem, asm)
for elem in ('LOAD_CONST', '(None)'):
self.assertIn(elem, asm)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertIn('LOAD_CONST', disassemble(f))
self.assertNotIn('LOAD_GLOBAL', disassemble(f))
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
asm = disassemble(f)
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
for elem in ('JUMP_ABSOLUTE',):
self.assertIn(elem, asm)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
self.assertNotIn('UNPACK_TUPLE', asm)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', '((1, 2, 3))'),
('("a","b","c")', "(('a', 'b', 'c'))"),
('a,b,c = 1,2,3', '((1, 2, 3))'),
('(None, 1, None)', '((None, 1, None))'),
('((1, 2), 3, 4)', '(((1, 2), 3, 4))'),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', '(9)'), # chained fold
('"@"*4', "('@@@@')"), # check string ops
('a="abc" + "def"', "('abcdef')"), # check string ops
('a = 3**4', '(81)'), # binary power
('a = 3*4', '(12)'), # binary multiply
('a = 13//4', '(3)'), # binary floor divide
('a = 14%4', '(2)'), # binary modulo
('a = 2+3', '(5)'), # binary add
('a = 13-4', '(9)'), # binary subtract
('a = (12,13)[1]', '(13)'), # binary subscr
('a = 13 << 2', '(52)'), # binary lshift
('a = 13 >> 2', '(3)'), # binary rshift
('a = 13 & 7', '(5)'), # binary and
('a = 13 ^ 7', '(10)'), # binary xor
('a = 13 | 7', '(15)'), # binary or
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('BINARY_', asm)
# Verify that unfoldables are skipped
asm = dis_single('a=2+"b"')
self.assertIn('(2)', asm)
self.assertIn("('b')", asm)
# Verify that large sequences do not result from folding
asm = dis_single('a="x"*1000')
self.assertIn('(1000)', asm)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('`1`', "('1')"), # unary convert
('-0.5', '(-0.5)'), # unary negative
('~-2', '(1)'), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('UNARY_', asm)
# Verify that unfoldables are skipped
for line, elem in (
('-"abc"', "('abc')"), # unary negative
('~"abc"', "('abc')"), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertIn('UNARY_', asm)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
asm = disassemble(f)
self.assertNotIn('LOAD_CONST', asm)
self.assertNotIn('(None)', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
# There should be one jump for the while loop.
self.assertEqual(asm.split().count('JUMP_ABSOLUTE'), 1)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_main(verbose=None):
import sys
from test import test_support
test_classes = (TestTranforms,)
with test_support.check_py3k_warnings(
("backquote not supported", SyntaxWarning)):
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
yunojuno/django-test
|
refs/heads/master
|
test_app/signals.py
|
2
|
# # -*- coding: utf-8 -*-
import logging
from os import path, listdir
from django.conf import settings
from django.dispatch import receiver
from trello_webhooks.signals import callback_received
from test_app.hipchat import send_to_hipchat
logger = logging.getLogger(__name__)
def get_supported_events():
"""Returns the list of available _local_ templates.
If a template exists in the local app, it will take precedence
over the default trello_webhooks template. The base assumption
for this function is that _if_ a local template exists, then this
is an event we are interested in.
"""
app_template_path = path.join(
path.realpath(path.dirname(__file__)),
'templates/trello_webhooks'
)
return [t.split('.')[0] for t in listdir(app_template_path)]
@receiver(callback_received, dispatch_uid="callback_received")
def on_callback_received(sender, **kwargs):
# if a template exists for the event_type, then send the output
# as a normal notification, in 'yellow'
# if no template exists, send a notification in 'red'
event = kwargs.pop('event')
html = event.render()
if settings.HIPCHAT_ENABLED:
logger.debug(
u"Message sent to HipChat [%s]: %r",
send_to_hipchat(html), event, event.webhook
)
else:
logger.debug(
u"HipChat is DISABLED, logging message instead: '%s'",
html
)
|
jkorell/PTVS
|
refs/heads/master
|
Python/Tests/TestData/RemoveImport/ImportDup.py
|
25
|
import oar, oar
|
gangadhar-kadam/powapp
|
refs/heads/master
|
setup/doctype/jobs_email_settings/jobs_email_settings.py
|
60
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
from webnotes.utils import cint
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def validate(self):
if cint(self.doc.extract_emails) and not (self.doc.email_id and self.doc.host and \
self.doc.username and self.doc.password):
webnotes.msgprint(_("""Host, Email and Password required if emails are to be pulled"""),
raise_exception=True)
|
eduNEXT/edunext-platform
|
refs/heads/master
|
openedx/core/djangoapps/ace_common/settings/devstack.py
|
9
|
"""
Settings for edX ACE on devstack.
"""
from openedx.core.djangoapps.ace_common.settings import common
def plugin_settings(settings):
"""
Override common settings and use `file_email` for better debugging.
"""
common.plugin_settings(settings)
settings.ACE_ENABLED_CHANNELS = [
'file_email'
]
settings.ACE_CHANNEL_DEFAULT_EMAIL = 'file_email'
settings.ACE_CHANNEL_TRANSACTIONAL_EMAIL = 'file_email'
|
open-craft/xblock-group-project-v2
|
refs/heads/master
|
group_project_v2/stage_components.py
|
1
|
import json
import logging
from collections import namedtuple
from datetime import date
from xml.etree import ElementTree
import webob
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import html
from lazy.lazy import lazy
from upload_validator import FileTypeValidator
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import UNIQUE_ID, Boolean, Scope, String
from xblock.validation import ValidationMessage
from xblockutils.studio_editable import StudioEditableXBlockMixin, XBlockWithPreviewMixin
from group_project_v2 import messages
from group_project_v2.api_error import ApiError
from group_project_v2.mixins import (
CompletionMixin,
NoStudioEditableSettingsMixin,
UserAwareXBlockMixin,
WorkgroupAwareXBlockMixin,
XBlockWithTranslationServiceMixin,
)
from group_project_v2.project_api import ProjectAPIXBlockMixin
from group_project_v2.project_navigator import ResourcesViewXBlock, SubmissionsViewXBlock
from group_project_v2.upload_file import UploadFile
from group_project_v2.utils import (
MUST_BE_OVERRIDDEN,
FieldValuesContextManager,
I18NService,
add_resource,
build_date_field,
format_date,
get_link_to_block,
)
from group_project_v2.utils import gettext as _
from group_project_v2.utils import (
groupwork_protected_view,
loader,
make_s3_link_temporary,
make_user_caption,
mean,
outer_html,
round_half_up,
)
log = logging.getLogger(__name__)
@XBlock.needs("i18n")
class BaseStageComponentXBlock(CompletionMixin, XBlock, XBlockWithTranslationServiceMixin, I18NService):
@lazy
def stage(self):
"""
:rtype: group_project_v2.stage.base.BaseGroupActivityStage
"""
return self.get_parent()
class BaseGroupProjectResourceXBlock(BaseStageComponentXBlock, StudioEditableXBlockMixin, XBlockWithPreviewMixin):
display_name = String(
display_name=_(u"Display Name"),
help=_(U"This is a name of the resource"),
scope=Scope.settings,
default=_(u"Group Project V2 Resource")
)
description = String(
display_name=_(u"Resource Description"),
scope=Scope.settings
)
editable_fields = ('display_name', 'description')
def student_view(self, _context): # pylint: disable=no-self-use
return Fragment()
def resources_view(self, context):
fragment = Fragment()
render_context = {'resource': self}
render_context.update(context)
fragment.add_content(loader.render_template(self.PROJECT_NAVIGATOR_VIEW_TEMPLATE, render_context))
return fragment
class GroupProjectResourceXBlock(BaseGroupProjectResourceXBlock):
CATEGORY = "gp-v2-resource"
STUDIO_LABEL = _(u"Resource")
PROJECT_NAVIGATOR_VIEW_TEMPLATE = 'templates/html/components/resource.html'
resource_location = String(
display_name=_(u"Resource Location"),
help=_(u"A url to download/view the resource"),
scope=Scope.settings,
)
grading_criteria = Boolean(
display_name=_(u"Grading Criteria?"),
help=_(u"If true, resource will be treated as grading criteria"),
scope=Scope.settings,
default=False
)
editable_fields = ('display_name', 'description', 'resource_location', )
def author_view(self, context):
return self.resources_view(context)
class GroupProjectVideoResourceXBlock(BaseGroupProjectResourceXBlock):
CATEGORY = "gp-v2-video-resource"
STUDIO_LABEL = _(u"Video Resource")
PROJECT_NAVIGATOR_VIEW_TEMPLATE = 'templates/html/components/video_resource.html'
video_id = String(
display_name=_(u"Ooyala/Brightcove content ID"),
help=_(u"This is the Ooyala/Brightcove Content Identifier"),
default="Q1eXg5NzpKqUUzBm5WTIb6bXuiWHrRMi",
scope=Scope.content,
)
editable_fields = ('display_name', 'description', 'video_id')
@classmethod
def is_available(cls):
return True # TODO: restore conditional availability when switched to use actual Ooyala XBlock
@classmethod
def brightcove_account_id(cls):
"""
Gets bcove account id from settings
"""
xblock_settings = settings.XBLOCK_SETTINGS if hasattr(settings, "XBLOCK_SETTINGS") else {}
return xblock_settings.get('OoyalaPlayerBlock', {}).get('BCOVE_ACCOUNT_ID')
@property
def video_type(self):
"""
Checks if video_id belongs to Brightcove or Ooyala
"""
try:
# Brightcove IDs are numeric
int(self.video_id)
return 'brightcove'
except (ValueError, TypeError):
return 'ooyala'
def resources_view(self, context):
render_context = {
'video_id': self.video_id,
'player_type': self.video_type,
'bc_account_id': self.brightcove_account_id(),
}
render_context.update(context)
fragment = super(GroupProjectVideoResourceXBlock, self).resources_view(render_context)
fragment.add_javascript_url(url='//players.brightcove.net/{}/default_default/index.min.js'
.format(self.brightcove_account_id()))
return fragment
def author_view(self, context):
return self.resources_view(context)
def validate_field_data(self, validation, data):
if not data.video_id:
validation.add(ValidationMessage(ValidationMessage.ERROR, self._(messages.MUST_CONTAIN_CONTENT_ID)))
return validation
class StaticContentBaseXBlock(BaseStageComponentXBlock, XBlockWithPreviewMixin, NoStudioEditableSettingsMixin):
TARGET_PROJECT_NAVIGATOR_VIEW = None
TEXT_TEMPLATE = None
TEMPLATE_PATH = "templates/html/components/static_content.html"
def student_view(self, context):
try:
activity = self.stage.activity
target_block = activity.project.navigator.get_child_of_category(self.TARGET_PROJECT_NAVIGATOR_VIEW)
except AttributeError:
activity = None
target_block = None
if target_block is None:
return Fragment()
render_context = {
'block': self,
'block_link': get_link_to_block(target_block),
'block_text': self._(self.TEXT_TEMPLATE).format(activity_name=self._(activity.display_name)),
'target_block_id': str(target_block.scope_ids.usage_id),
'view_icon': target_block.icon
}
render_context.update(context)
fragment = Fragment()
fragment.add_content(loader.render_template(self.TEMPLATE_PATH, render_context))
return fragment
class SubmissionsStaticContentXBlock(StaticContentBaseXBlock):
DISPLAY_NAME = _(u"Submissions Help Text")
STUDIO_LABEL = DISPLAY_NAME
CATEGORY = "gp-v2-static-submissions"
display_name_with_default = DISPLAY_NAME
TARGET_PROJECT_NAVIGATOR_VIEW = SubmissionsViewXBlock.CATEGORY
TEXT_TEMPLATE = _(
u"You can upload (or replace) your file(s) before the due date in the project navigator panel"
u" at right by clicking the upload button"
)
class GradeRubricStaticContentXBlock(StaticContentBaseXBlock):
DISPLAY_NAME = _(u"Grade Rubric Help Text")
STUDIO_LABEL = DISPLAY_NAME
CATEGORY = "gp-v2-static-grade-rubric"
display_name_with_default = DISPLAY_NAME
TARGET_PROJECT_NAVIGATOR_VIEW = ResourcesViewXBlock.CATEGORY
TEXT_TEMPLATE = _(
u"The {activity_name} grading rubric is provided in the project navigator panel"
u" at right by clicking the resources button"""
)
# pylint: disable=invalid-name
SubmissionUpload = namedtuple("SubmissionUpload", "location file_name submission_date user_details")
@XBlock.needs('user')
@XBlock.wants('notifications')
class GroupProjectSubmissionXBlock(
BaseStageComponentXBlock, ProjectAPIXBlockMixin, StudioEditableXBlockMixin, XBlockWithPreviewMixin
):
CATEGORY = "gp-v2-submission"
STUDIO_LABEL = _(u"Submission")
PROJECT_NAVIGATOR_VIEW_TEMPLATE = 'templates/html/components/submission_navigator_view.html'
REVIEW_VIEW_TEMPLATE = 'templates/html/components/submission_review_view.html'
display_name = String(
display_name=_(u"Display Name"),
help=_(U"This is a name of the submission"),
scope=Scope.settings,
default=_(u"Group Project V2 Submission")
)
description = String(
display_name=_(u"Submission Description"),
scope=Scope.settings
)
upload_id = String(
display_name=_(u"Upload ID"),
help=_(U"This string is used as an identifier for an upload. "
U"Submissions sharing the same Upload ID will be updated simultaneously"),
)
editable_fields = ('display_name', 'description', 'upload_id')
SUBMISSION_RECEIVED_EVENT = "activity.received_submission"
# TODO: Make configurable via XBlock settings
DEFAULT_FILE_FILTERS = {
"mime-types": (
# Images
"image/png", "image/jpeg", "image/tiff",
# Excel
"application/vnd.ms-excel", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
# Word
"application/msword", "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
# PowerPoint
"application/vnd.ms-powerpoint",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
# PDF
"application/pdf"
),
"extensions": ("png", "jpg", "jpeg", "tif", "tiff", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "pdf",)
}
validator = FileTypeValidator(
allowed_types=DEFAULT_FILE_FILTERS["mime-types"],
allowed_extensions=[".{}".format(ext) for ext in DEFAULT_FILE_FILTERS["extensions"]]
)
def get_upload(self, group_id):
submission_map = self.project_api.get_latest_workgroup_submissions_by_id(group_id)
submission_data = submission_map.get(self.upload_id, None)
if submission_data is None:
return None
document_signed_url = make_s3_link_temporary(
submission_data.get('workgroup'),
submission_data['document_url'].split('/')[-2],
submission_data['document_filename'],
submission_data["document_url"]
)
return SubmissionUpload(
document_signed_url,
submission_data["document_filename"],
format_date(build_date_field(submission_data["modified"])),
submission_data.get("user_details", None)
)
@property
def upload(self):
return self.get_upload(self.stage.activity.workgroup.id)
def student_view(self, _context): # pylint: disable=no-self-use
return Fragment()
def submissions_view(self, context):
fragment = Fragment()
# pylint: disable=consider-using-ternary
uploading_allowed = (self.stage.available_now and self.stage.is_group_member) or self.stage.is_admin_grader
render_context = {'submission': self, 'upload': self.upload, 'disabled': not uploading_allowed}
render_context.update(context)
fragment.add_content(loader.render_django_template(
self.PROJECT_NAVIGATOR_VIEW_TEMPLATE,
render_context,
i18n_service=self.i18n_service,
))
add_resource(self, 'javascript', 'public/js/components/submission.js', fragment)
fragment.initialize_js("GroupProjectSubmissionBlock")
return fragment
def submission_review_view(self, context):
group_id = context.get('group_id', self.stage.activity.workgroup.id)
fragment = Fragment()
render_context = {'submission': self, 'upload': self.get_upload(group_id)}
render_context.update(context)
fragment.add_content(loader.render_django_template(
self.REVIEW_VIEW_TEMPLATE,
render_context,
i18n_service=self.i18n_service,
))
# NOTE: adding js/css likely won't work here, as the result of this view is added as an HTML to an existing DOM
# element
return fragment
def _validate_upload(self, request):
if not self.stage.available_now:
if self.stage.is_open:
template = self._(messages.STAGE_CLOSED_TEMPLATE)
else:
template = self._(messages.STAGE_NOT_OPEN_TEMPLATE)
# 422 = unprocessable entity
return 422, {'result': 'error', 'message': template.format(action=self._(self.stage.STAGE_ACTION))}
if not self.stage.is_group_member and not self.stage.is_admin_grader:
# 403 - forbidden
return 403, {'result': 'error', 'message': self._(messages.NON_GROUP_MEMBER_UPLOAD)}
try:
self.validator(request.params[self.upload_id].file)
except ValidationError as validationError:
message = validationError.message % validationError.params
# 400 - BAD REQUEST
return 400, {'result': 'error', 'message': message}
return None, None
@XBlock.handler
def upload_submission(self, request, _suffix=''):
"""
Handles submission upload and marks stage as completed if all submissions in stage have uploads.
:param request: HTTP request
:param str _suffix:
"""
failure_code, response_data = self._validate_upload(request)
if failure_code is None and response_data is None:
target_activity = self.stage.activity
response_data = {
"title": self._(messages.SUCCESSFUL_UPLOAD_TITLE),
"message": self._(messages.SUCCESSFUL_UPLOAD_MESSAGE_TPL).format(icon='fa fa-paperclip')
}
failure_code = 0
try:
context = {
"user_id": target_activity.user_id,
"group_id": target_activity.workgroup.id,
"project_api": self.project_api,
"course_id": target_activity.course_id
}
uploaded_file = self.persist_and_submit_file(
target_activity, context, request.params[self.upload_id].file
)
response_data["submissions"] = {
uploaded_file.submission_id: make_s3_link_temporary(
uploaded_file.group_id,
uploaded_file.sha1,
uploaded_file.file.name,
uploaded_file.file_url,
)
}
self.stage.check_submissions_and_mark_complete()
response_data["new_stage_states"] = [self.stage.get_new_stage_state_data()]
response_data['user_label'] = self.project_api.get_user_details(target_activity.user_id).user_label
response_data['submission_date'] = format_date(date.today())
except Exception as exception: # pylint: disable=broad-except
log.exception(exception)
failure_code = 500
if isinstance(exception, ApiError):
failure_code = exception.code
error_message = str(exception).strip()
if error_message == '':
error_message = self._(messages.UNKNOWN_ERROR)
response_data.update({
"title": self._(messages.FAILED_UPLOAD_TITLE),
"message": self._(messages.FAILED_UPLOAD_MESSAGE_TPL).format(error_goes_here=error_message)
})
response = webob.response.Response(body=json.dumps(response_data))
if failure_code:
response.status_code = failure_code
return response
def persist_and_submit_file(self, activity, context, file_stream):
"""
Saves uploaded files to their permanent location, sends them to submissions backend and emits submission events
"""
uploaded_file = UploadFile(file_stream, self.upload_id, context)
# Save the files first
try:
uploaded_file.save_file()
except Exception as save_file_error: # pylint: disable=broad-except
original_message = save_file_error.message if hasattr(save_file_error, "message") else ""
save_file_error.message = _("Error storing file {} - {}").format(uploaded_file.file.name, original_message)
raise
# It have been saved... note the submission
try:
uploaded_file.submit()
# Emit analytics event...
self.runtime.publish(
self,
self.SUBMISSION_RECEIVED_EVENT,
{
"submission_id": uploaded_file.submission_id,
"filename": uploaded_file.file.name,
"content_id": activity.content_id,
"group_id": activity.workgroup.id,
"user_id": activity.user_id,
}
)
except Exception as save_record_error: # pylint: disable=broad-except
original_message = save_record_error.message if hasattr(save_record_error, "message") else ""
save_record_error.message = _("Error recording file information {} - {}").format(
uploaded_file.file.name, original_message
)
raise
# See if the xBlock Notification Service is available, and - if so -
# dispatch a notification to the entire workgroup that a file has been uploaded
# Note that the NotificationService can be disabled, so it might not be available
# in the list of services
notifications_service = self.runtime.service(self, 'notifications')
if notifications_service:
self.stage.fire_file_upload_notification(notifications_service)
return uploaded_file
class ReviewSubjectSeletorXBlockBase(BaseStageComponentXBlock, XBlockWithPreviewMixin, NoStudioEditableSettingsMixin):
"""
Base class for review selector blocks
"""
@property
def review_subjects(self):
raise NotImplementedError(MUST_BE_OVERRIDDEN)
@XBlock.handler
def get_statuses(self, _request, _suffix=''):
response_data = {
review_subject.id: self.stage.get_review_state(review_subject.id)
for review_subject in self.review_subjects
}
return webob.response.Response(body=json.dumps(response_data))
def student_view(self, context):
fragment = Fragment()
render_context = {'selector': self, 'review_subjects': self.get_review_subject_repr()}
render_context.update(context)
add_resource(self, 'css', "public/css/components/review_subject_selector.css", fragment)
add_resource(self, 'javascript', "public/js/components/review_subject_selector.js", fragment)
fragment.add_content(loader.render_django_template(
self.STUDENT_TEMPLATE,
render_context,
i18n_service=self.i18n_service,
))
fragment.initialize_js('ReviewSubjectSelectorXBlock')
return fragment
class PeerSelectorXBlock(ReviewSubjectSeletorXBlockBase, UserAwareXBlockMixin):
CATEGORY = "gp-v2-peer-selector"
STUDIO_LABEL = _(u"Teammate Selector")
display_name_with_default = _(u"Teammate Selector XBlock")
STUDENT_TEMPLATE = "templates/html/components/peer_selector.html"
@property
def review_subjects(self):
return self.stage.team_members
def get_review_subject_repr(self):
return [
{
'id': peer.id,
'username': peer.username,
'user_label': make_user_caption(peer),
'profile_image_url': peer.profile_image_url
}
for peer in self.review_subjects
]
def author_view(self, context):
fake_peers = [
{"id": 1, "username": "Jack"},
{"id": 2, "username": "Jill"},
]
render_context = {
'demo': True,
'review_subjects': fake_peers
}
render_context.update(context)
return self.student_view(render_context)
class GroupSelectorXBlock(ReviewSubjectSeletorXBlockBase):
CATEGORY = "gp-v2-group-selector"
STUDIO_LABEL = _(u"Group Selector")
display_name_with_default = _(u"Group Selector XBlock")
STUDENT_TEMPLATE = "templates/html/components/group_selector.html"
@property
def review_subjects(self):
return self.stage.review_groups
def get_review_subject_repr(self):
return [{'id': group.id} for group in self.review_subjects]
def author_view(self, context):
fake_groups = [
{"id": 1},
{"id": 2},
]
render_context = {
'demo': True,
'review_subjects': fake_groups
}
render_context.update(context)
return self.student_view(render_context)
class GroupProjectReviewQuestionXBlock(BaseStageComponentXBlock, StudioEditableXBlockMixin, XBlockWithPreviewMixin):
CATEGORY = "gp-v2-review-question"
STUDIO_LABEL = _(u"Review Question")
@property
def display_name_with_default(self):
return self.title or _(u"Review Question")
question_id = String(
display_name=_(u"Question ID"),
default=UNIQUE_ID,
scope=Scope.content,
force_export=True
)
title = String(
display_name=_(u"Question Text"),
default=_(u""),
scope=Scope.content
)
assessment_title = String(
display_name=_(u"Assessment Question Text"),
help=_(u"Overrides question title when displayed in assessment mode"),
default=None,
scope=Scope.content
)
question_content = String(
display_name=_(u"Question Content"),
help=_(u"HTML control"),
default=_(u""),
scope=Scope.content,
multiline_editor="xml",
xml_node=True
)
required = Boolean(
display_name=_(u"Required"),
default=False,
scope=Scope.content
)
grade = Boolean(
display_name=_(u"Grading"),
help=_(u"IF True, answers to this question will be used to calculate student grade for Group Project."),
default=False,
scope=Scope.content
)
single_line = Boolean(
display_name=_(u"Single Line"),
help=_(u"If True question label and content will be displayed on single line, allowing for more compact layout."
u"Only affects presentation."),
default=False,
scope=Scope.content
)
question_css_classes = String(
display_name=_(u"CSS Classes"),
help=_(u"CSS classes to be set on question element. Only affects presentation."),
scope=Scope.content
)
editable_fields = (
"question_id", "title", "assessment_title", "question_content", "required", "grade", "single_line",
"question_css_classes"
)
has_author_view = True
@lazy
def stage(self):
return self.get_parent()
def render_content(self):
try:
answer_node = ElementTree.fromstring(self.question_content)
except ElementTree.ParseError:
message_tpl = "Exception when parsing question content for question {question_id}. Content is [{content}]."
message_tpl.format(question_id=self.question_id, content=self.question_content)
log.exception(message_tpl)
return ""
answer_node.set('name', self.question_id)
answer_node.set('id', self.question_id)
current_class = answer_node.get('class')
answer_classes = ['answer']
if current_class:
answer_classes.append(current_class)
if self.single_line:
answer_classes.append('side')
if self.stage.is_closed:
answer_node.set('disabled', 'disabled')
else:
answer_classes.append('editable')
answer_node.set('class', ' '.join(answer_classes))
return outer_html(answer_node)
def student_view(self, context):
question_classes = ["question"]
if self.required:
question_classes.append("required")
if self.question_css_classes:
question_classes.append(self.question_css_classes)
fragment = Fragment()
render_context = {
'question': self,
'question_classes': " ".join(question_classes),
'question_content': self.render_content()
}
render_context.update(context)
fragment.add_content(
loader.render_django_template("templates/html/components/review_question.html", render_context))
return fragment
def studio_view(self, context):
fragment = super(GroupProjectReviewQuestionXBlock, self).studio_view(context)
# TODO: StudioEditableXBlockMixin should really support Codemirror XML editor
add_resource(self, 'css', "public/css/components/question_edit.css", fragment)
add_resource(self, 'javascript', "public/js/components/question_edit.js", fragment)
fragment.initialize_js("GroupProjectQuestionEdit")
return fragment
def author_view(self, context):
fragment = self.student_view(context)
add_resource(self, 'css', "public/css/components/question_edit.css", fragment)
return fragment
class GroupProjectBaseFeedbackDisplayXBlock(
BaseStageComponentXBlock, StudioEditableXBlockMixin, XBlockWithPreviewMixin, WorkgroupAwareXBlockMixin
):
DEFAULT_QUESTION_ID_VALUE = None
NO_QUESTION_SELECTED = _(u"No question selected")
QUESTION_NOT_FOUND = _(u"Selected question not found")
QUESTION_ID_IS_NOT_UNIQUE = _(u"Question ID is not unique")
question_id = String(
display_name=_(u"Question ID"),
help=_(u"Question to be assessed"),
scope=Scope.content,
default=DEFAULT_QUESTION_ID_VALUE
)
show_mean = Boolean(
display_name=_(u"Show Mean Value"),
help=_(u"If True, converts review answers to numbers and calculates mean value"),
default=False,
scope=Scope.content
)
editable_fields = ("question_id", "show_mean")
has_author_view = True
@property
def activity_questions(self):
raise NotImplementedError(MUST_BE_OVERRIDDEN)
@property
def display_name_with_default(self):
if self.question:
return _(u'Review Assessment for question "{question_title}"').format(question_title=self.question.title)
return _(u"Review Assessment")
@lazy
def question(self):
matching_questions = [
question for question in self.activity_questions if question.question_id == self.question_id
]
if len(matching_questions) > 1:
raise ValueError(self.QUESTION_ID_IS_NOT_UNIQUE)
if not matching_questions:
return None
return matching_questions[0]
@groupwork_protected_view
def student_view(self, context):
if self.question is None:
return Fragment(self._(messages.COMPONENT_MISCONFIGURED))
raw_feedback = self.get_feedback()
feedback = []
for item in raw_feedback:
feedback.append(html.escape(item['answer']))
fragment = Fragment()
title = self.question.assessment_title if self.question.assessment_title else self.question.title
render_context = {'assessment': self, 'question_title': title, 'feedback': feedback}
if self.show_mean:
try:
if feedback:
render_context['mean'] = round_half_up(mean(feedback))
else:
render_context['mean'] = self._(u"N/A")
except ValueError as exc:
log.warn(exc) # pylint: disable=deprecated-method
render_context['mean'] = self._(u"N/A")
render_context.update(context)
fragment.add_content(loader.render_django_template(
"templates/html/components/review_assessment.html",
render_context,
i18n_service=self.i18n_service,
))
return fragment
def validate(self):
validation = super(GroupProjectBaseFeedbackDisplayXBlock, self).validate()
if not self.question_id:
validation.add(ValidationMessage(
ValidationMessage.ERROR,
self.NO_QUESTION_SELECTED
))
if self.question_id and self.question is None:
validation.add(ValidationMessage(
ValidationMessage.ERROR,
self.QUESTION_NOT_FOUND
))
return validation
def author_view(self, context):
if self.question:
return self.student_view(context)
fragment = Fragment()
fragment.add_content(self._(messages.QUESTION_NOT_SELECTED))
return fragment
def studio_view(self, context):
# can't use values_provider as we need it to be bound to current block instance
with FieldValuesContextManager(self, 'question_id', self.question_ids_values_provider):
return super(GroupProjectBaseFeedbackDisplayXBlock, self).studio_view(context)
def question_ids_values_provider(self):
not_selected = {
"display_name": _(u"--- Not selected ---"), "value": self.DEFAULT_QUESTION_ID_VALUE
}
question_values = [
{"display_name": question.title, "value": question.question_id}
for question in self.activity_questions
]
return [not_selected] + question_values
class GroupProjectTeamEvaluationDisplayXBlock(GroupProjectBaseFeedbackDisplayXBlock):
CATEGORY = "gp-v2-peer-assessment"
STUDIO_LABEL = _(u"Team Evaluation Display")
@property
def activity_questions(self):
return self.stage.activity.team_evaluation_questions
def get_feedback(self):
all_feedback = self.project_api.get_user_peer_review_items(
self.user_id,
self.group_id,
self.stage.activity_content_id,
)
return [item for item in all_feedback if item["question"] == self.question_id]
class GroupProjectGradeEvaluationDisplayXBlock(GroupProjectBaseFeedbackDisplayXBlock):
CATEGORY = "gp-v2-group-assessment"
STUDIO_LABEL = _(u"Grade Evaluation Display")
@property
def activity_questions(self):
return self.stage.activity.peer_review_questions
def get_feedback(self):
all_feedback = self.project_api.get_workgroup_review_items_for_group(
self.group_id,
self.stage.activity_content_id,
)
return [item for item in all_feedback if item["question"] == self.question_id]
class ProjectTeamXBlock(
BaseStageComponentXBlock, XBlockWithPreviewMixin, NoStudioEditableSettingsMixin, StudioEditableXBlockMixin,
):
CATEGORY = 'gp-v2-project-team'
STUDIO_LABEL = _(u"Project Team")
display_name_with_default = STUDIO_LABEL
def student_view(self, context):
fragment = Fragment()
# Could be a TA not in the group.
if self.stage.is_group_member:
user_details = [self.stage.project_api.get_member_data(self.stage.user_id)]
else:
user_details = []
render_context = {
'team_members': user_details + self.stage.team_members,
'course_id': self.stage.course_id,
'group_id': self.stage.workgroup.id
}
render_context.update(context)
fragment.add_content(loader.render_django_template(
"templates/html/components/project_team.html",
render_context,
i18n_service=self.i18n_service,
))
add_resource(self, 'css', "public/css/components/project_team.css", fragment)
add_resource(self, 'javascript', "public/js/components/project_team.js", fragment)
fragment.initialize_js("ProjectTeamXBlock")
return fragment
|
ianunruh/hvac
|
refs/heads/master
|
hvac/constants/identity.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants related to the Identity secrets engine."""
ALLOWED_GROUP_TYPES = [
'internal',
'external',
]
|
linuxwhatelse/plugin.audio.linuxwhatelse.gmusic
|
refs/heads/master
|
resources/libs/lib/google/protobuf/internal/_parameterized.py
|
87
|
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Adds support for parameterized tests to Python's unittest TestCase class.
A parameterized test is a method in a test case that is invoked with different
argument tuples.
A simple example:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9),
(1, 1, 3))
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Each invocation is a separate test case and properly isolated just
like a normal test method, with its own setUp/tearDown cycle. In the
example above, there are three separate testcases, one of which will
fail due to an assertion error (1 + 1 != 3).
Parameters for invididual test cases can be tuples (with positional parameters)
or dictionaries (with named parameters):
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
{'op1': 1, 'op2': 2, 'result': 3},
{'op1': 4, 'op2': 5, 'result': 9},
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
If a parameterized test fails, the error message will show the
original test name (which is modified internally) and the arguments
for the specific invocation, which are part of the string returned by
the shortDescription() method on test cases.
The id method of the test, used internally by the unittest framework,
is also modified to show the arguments. To make sure that test names
stay the same across several invocations, object representations like
>>> class Foo(object):
... pass
>>> repr(Foo())
'<__main__.Foo object at 0x23d8610>'
are turned into '<__main__.Foo>'. For even more descriptive names,
especially in test logs, you can use the NamedParameters decorator. In
this case, only tuples are supported, and the first parameters has to
be a string (or an object that returns an apt name when converted via
str()):
class NamedExample(parameterized.ParameterizedTestCase):
@parameterized.NamedParameters(
('Normal', 'aa', 'aaa', True),
('EmptyPrefix', '', 'abc', True),
('BothEmpty', '', '', True))
def testStartsWith(self, prefix, string, result):
self.assertEqual(result, strings.startswith(prefix))
Named tests also have the benefit that they can be run individually
from the command line:
$ testmodule.py NamedExample.testStartsWithNormal
.
--------------------------------------------------------------------
Ran 1 test in 0.000s
OK
Parameterized Classes
=====================
If invocation arguments are shared across test methods in a single
ParameterizedTestCase class, instead of decorating all test methods
individually, the class itself can be decorated:
@parameterized.Parameters(
(1, 2, 3)
(4, 5, 9))
class ArithmeticTest(parameterized.ParameterizedTestCase):
def testAdd(self, arg1, arg2, result):
self.assertEqual(arg1 + arg2, result)
def testSubtract(self, arg2, arg2, result):
self.assertEqual(result - arg1, arg2)
Inputs from Iterables
=====================
If parameters should be shared across several test cases, or are dynamically
created from other sources, a single non-tuple iterable can be passed into
the decorator. This iterable will be used to obtain the test cases:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
c.op1, c.op2, c.result for c in testcases
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Single-Argument Test Methods
============================
If a test method takes only one argument, the single argument does not need to
be wrapped into a tuple:
class NegativeNumberExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
-1, -3, -4, -5
)
def testIsNegative(self, arg):
self.assertTrue(IsNegative(arg))
"""
__author__ = 'tmarek@google.com (Torsten Marek)'
import collections
import functools
import re
import types
try:
import unittest2 as unittest
except ImportError:
import unittest
import uuid
import six
ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
_SEPARATOR = uuid.uuid1().hex
_FIRST_ARG = object()
_ARGUMENT_REPR = object()
def _CleanRepr(obj):
return ADDR_RE.sub(r'<\1>', repr(obj))
# Helper function formerly from the unittest module, removed from it in
# Python 2.7.
def _StrClass(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
def _NonStringIterable(obj):
return (isinstance(obj, collections.Iterable) and not
isinstance(obj, six.string_types))
def _FormatParameterList(testcase_params):
if isinstance(testcase_params, collections.Mapping):
return ', '.join('%s=%s' % (argname, _CleanRepr(value))
for argname, value in testcase_params.items())
elif _NonStringIterable(testcase_params):
return ', '.join(map(_CleanRepr, testcase_params))
else:
return _FormatParameterList((testcase_params,))
class _ParameterizedTestIter(object):
"""Callable and iterable class for producing new test cases."""
def __init__(self, test_method, testcases, naming_type):
"""Returns concrete test functions for a test and a list of parameters.
The naming_type is used to determine the name of the concrete
functions as reported by the unittest framework. If naming_type is
_FIRST_ARG, the testcases must be tuples, and the first element must
have a string representation that is a valid Python identifier.
Args:
test_method: The decorated test method.
testcases: (list of tuple/dict) A list of parameter
tuples/dicts for individual test invocations.
naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
"""
self._test_method = test_method
self.testcases = testcases
self._naming_type = naming_type
def __call__(self, *args, **kwargs):
raise RuntimeError('You appear to be running a parameterized test case '
'without having inherited from parameterized.'
'ParameterizedTestCase. This is bad because none of '
'your test cases are actually being run.')
def __iter__(self):
test_method = self._test_method
naming_type = self._naming_type
def MakeBoundParamTest(testcase_params):
@functools.wraps(test_method)
def BoundParamTest(self):
if isinstance(testcase_params, collections.Mapping):
test_method(self, **testcase_params)
elif _NonStringIterable(testcase_params):
test_method(self, *testcase_params)
else:
test_method(self, testcase_params)
if naming_type is _FIRST_ARG:
# Signal the metaclass that the name of the test function is unique
# and descriptive.
BoundParamTest.__x_use_name__ = True
BoundParamTest.__name__ += str(testcase_params[0])
testcase_params = testcase_params[1:]
elif naming_type is _ARGUMENT_REPR:
# __x_extra_id__ is used to pass naming information to the __new__
# method of TestGeneratorMetaclass.
# The metaclass will make sure to create a unique, but nondescriptive
# name for this test.
BoundParamTest.__x_extra_id__ = '(%s)' % (
_FormatParameterList(testcase_params),)
else:
raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
BoundParamTest.__doc__ = '%s(%s)' % (
BoundParamTest.__name__, _FormatParameterList(testcase_params))
if test_method.__doc__:
BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,)
return BoundParamTest
return (MakeBoundParamTest(c) for c in self.testcases)
def _IsSingletonList(testcases):
"""True iff testcases contains only a single non-tuple element."""
return len(testcases) == 1 and not isinstance(testcases[0], tuple)
def _ModifyClass(class_object, testcases, naming_type):
assert not getattr(class_object, '_id_suffix', None), (
'Cannot add parameters to %s,'
' which already has parameterized methods.' % (class_object,))
class_object._id_suffix = id_suffix = {}
# We change the size of __dict__ while we iterate over it,
# which Python 3.x will complain about, so use copy().
for name, obj in class_object.__dict__.copy().items():
if (name.startswith(unittest.TestLoader.testMethodPrefix)
and isinstance(obj, types.FunctionType)):
delattr(class_object, name)
methods = {}
_UpdateClassDictForParamTestCase(
methods, id_suffix, name,
_ParameterizedTestIter(obj, testcases, naming_type))
for name, meth in methods.items():
setattr(class_object, name, meth)
def _ParameterDecorator(naming_type, testcases):
"""Implementation of the parameterization decorators.
Args:
naming_type: The naming type.
testcases: Testcase parameters.
Returns:
A function for modifying the decorated object.
"""
def _Apply(obj):
if isinstance(obj, type):
_ModifyClass(
obj,
list(testcases) if not isinstance(testcases, collections.Sequence)
else testcases,
naming_type)
return obj
else:
return _ParameterizedTestIter(obj, testcases, naming_type)
if _IsSingletonList(testcases):
assert _NonStringIterable(testcases[0]), (
'Single parameter argument must be a non-string iterable')
testcases = testcases[0]
return _Apply
def Parameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples/dicts/objects (for tests
with only one argument).
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_ARGUMENT_REPR, testcases)
def NamedParameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example. The first element of
each parameter tuple should be a string and will be appended to the
name of the test method.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_FIRST_ARG, testcases)
class TestGeneratorMetaclass(type):
"""Metaclass for test cases with test generators.
A test generator is an iterable in a testcase that produces callables. These
callables must be single-argument methods. These methods are injected into
the class namespace and the original iterable is removed. If the name of the
iterable conforms to the test pattern, the injected methods will be picked
up as tests by the unittest framework.
In general, it is supposed to be used in conjunction with the
Parameters decorator.
"""
def __new__(mcs, class_name, bases, dct):
dct['_id_suffix'] = id_suffix = {}
for name, obj in dct.items():
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
_NonStringIterable(obj)):
iterator = iter(obj)
dct.pop(name)
_UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator)
return type.__new__(mcs, class_name, bases, dct)
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):
"""Adds individual test cases to a dictionary.
Args:
dct: The target dictionary.
id_suffix: The dictionary for mapping names to test IDs.
name: The original name of the test case.
iterator: The iterator generating the individual test cases.
"""
for idx, func in enumerate(iterator):
assert callable(func), 'Test generators must yield callables, got %r' % (
func,)
if getattr(func, '__x_use_name__', False):
new_name = func.__name__
else:
new_name = '%s%s%d' % (name, _SEPARATOR, idx)
assert new_name not in dct, (
'Name of parameterized test case "%s" not unique' % (new_name,))
dct[new_name] = func
id_suffix[new_name] = getattr(func, '__x_extra_id__', '')
class ParameterizedTestCase(unittest.TestCase):
"""Base class for test cases using the Parameters decorator."""
__metaclass__ = TestGeneratorMetaclass
def _OriginalName(self):
return self._testMethodName.split(_SEPARATOR)[0]
def __str__(self):
return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__))
def id(self): # pylint: disable=invalid-name
"""Returns the descriptive ID of the test.
This is used internally by the unittesting framework to get a name
for the test to be used in reports.
Returns:
The test id.
"""
return '%s.%s%s' % (_StrClass(self.__class__),
self._OriginalName(),
self._id_suffix.get(self._testMethodName, ''))
def CoopParameterizedTestCase(other_base_class):
"""Returns a new base class with a cooperative metaclass base.
This enables the ParameterizedTestCase to be used in combination
with other base classes that have custom metaclasses, such as
mox.MoxTestBase.
Only works with metaclasses that do not override type.__new__.
Example:
import google3
import mox
from google3.testing.pybase import parameterized
class ExampleTest(parameterized.CoopParameterizedTestCase(mox.MoxTestBase)):
...
Args:
other_base_class: (class) A test case base class.
Returns:
A new class object.
"""
metaclass = type(
'CoopMetaclass',
(other_base_class.__metaclass__,
TestGeneratorMetaclass), {})
return metaclass(
'CoopParameterizedTestCase',
(other_base_class, ParameterizedTestCase), {})
|
ruibarreira/linuxtrail
|
refs/heads/master
|
usr/lib/python2.7/dist-packages/docutils/transforms/references.py
|
113
|
# $Id: references.py 7624 2013-03-07 14:10:26Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for resolving references.
"""
__docformat__ = 'reStructuredText'
import sys
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class PropagateTargets(Transform):
"""
Propagate empty internal targets to the next element.
Given the following nodes::
<target ids="internal1" names="internal1">
<target anonymous="1" ids="id1">
<target ids="internal2" names="internal2">
<paragraph>
This is a test.
PropagateTargets propagates the ids and names of the internal
targets preceding the paragraph to the paragraph itself::
<target refid="internal1">
<target anonymous="1" refid="id1">
<target refid="internal2">
<paragraph ids="internal2 id1 internal1" names="internal2 internal1">
This is a test.
"""
default_priority = 260
def apply(self):
for target in self.document.traverse(nodes.target):
# Only block-level targets without reference (like ".. target:"):
if (isinstance(target.parent, nodes.TextElement) or
(target.hasattr('refid') or target.hasattr('refuri') or
target.hasattr('refname'))):
continue
assert len(target) == 0, 'error: block-level target has children'
next_node = target.next_node(ascend=True)
# Do not move names and ids into Invisibles (we'd lose the
# attributes) or different Targetables (e.g. footnotes).
if (next_node is not None and
((not isinstance(next_node, nodes.Invisible) and
not isinstance(next_node, nodes.Targetable)) or
isinstance(next_node, nodes.target))):
next_node['ids'].extend(target['ids'])
next_node['names'].extend(target['names'])
# Set defaults for next_node.expect_referenced_by_name/id.
if not hasattr(next_node, 'expect_referenced_by_name'):
next_node.expect_referenced_by_name = {}
if not hasattr(next_node, 'expect_referenced_by_id'):
next_node.expect_referenced_by_id = {}
for id in target['ids']:
# Update IDs to node mapping.
self.document.ids[id] = next_node
# If next_node is referenced by id ``id``, this
# target shall be marked as referenced.
next_node.expect_referenced_by_id[id] = target
for name in target['names']:
next_node.expect_referenced_by_name[name] = target
# If there are any expect_referenced_by_... attributes
# in target set, copy them to next_node.
next_node.expect_referenced_by_name.update(
getattr(target, 'expect_referenced_by_name', {}))
next_node.expect_referenced_by_id.update(
getattr(target, 'expect_referenced_by_id', {}))
# Set refid to point to the first former ID of target
# which is now an ID of next_node.
target['refid'] = target['ids'][0]
# Clear ids and names; they have been moved to
# next_node.
target['ids'] = []
target['names'] = []
self.document.note_refid(target)
class AnonymousHyperlinks(Transform):
"""
Link anonymous references to targets. Given::
<paragraph>
<reference anonymous="1">
internal
<reference anonymous="1">
external
<target anonymous="1" ids="id1">
<target anonymous="1" ids="id2" refuri="http://external">
Corresponding references are linked via "refid" or resolved via "refuri"::
<paragraph>
<reference anonymous="1" refid="id1">
text
<reference anonymous="1" refuri="http://external">
external
<target anonymous="1" ids="id1">
<target anonymous="1" ids="id2" refuri="http://external">
"""
default_priority = 440
def apply(self):
anonymous_refs = []
anonymous_targets = []
for node in self.document.traverse(nodes.reference):
if node.get('anonymous'):
anonymous_refs.append(node)
for node in self.document.traverse(nodes.target):
if node.get('anonymous'):
anonymous_targets.append(node)
if len(anonymous_refs) \
!= len(anonymous_targets):
msg = self.document.reporter.error(
'Anonymous hyperlink mismatch: %s references but %s '
'targets.\nSee "backrefs" attribute for IDs.'
% (len(anonymous_refs), len(anonymous_targets)))
msgid = self.document.set_id(msg)
for ref in anonymous_refs:
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
return
for ref, target in zip(anonymous_refs, anonymous_targets):
target.referenced = 1
while True:
if target.hasattr('refuri'):
ref['refuri'] = target['refuri']
ref.resolved = 1
break
else:
if not target['ids']:
# Propagated target.
target = self.document.ids[target['refid']]
continue
ref['refid'] = target['ids'][0]
self.document.note_refid(ref)
break
class IndirectHyperlinks(Transform):
"""
a) Indirect external references::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refname="direct external">
The "refuri" attribute is migrated back to all indirect targets
from the final direct target (i.e. a target not referring to
another indirect target)::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refuri="http://indirect">
Once the attribute is migrated, the preexisting "refname" attribute
is dropped.
b) Indirect internal references::
<target id="id1" name="final target">
<paragraph>
<reference refname="indirect internal">
indirect internal
<target id="id2" name="indirect internal 2"
refname="final target">
<target id="id3" name="indirect internal"
refname="indirect internal 2">
Targets which indirectly refer to an internal target become one-hop
indirect (their "refid" attributes are directly set to the internal
target's "id"). References which indirectly refer to an internal
target become direct internal references::
<target id="id1" name="final target">
<paragraph>
<reference refid="id1">
indirect internal
<target id="id2" name="indirect internal 2" refid="id1">
<target id="id3" name="indirect internal" refid="id1">
"""
default_priority = 460
def apply(self):
for target in self.document.indirect_targets:
if not target.resolved:
self.resolve_indirect_target(target)
self.resolve_indirect_references(target)
def resolve_indirect_target(self, target):
refname = target.get('refname')
if refname is None:
reftarget_id = target['refid']
else:
reftarget_id = self.document.nameids.get(refname)
if not reftarget_id:
# Check the unknown_reference_resolvers
for resolver_function in \
self.document.transformer.unknown_reference_resolvers:
if resolver_function(target):
break
else:
self.nonexistent_indirect_target(target)
return
reftarget = self.document.ids[reftarget_id]
reftarget.note_referenced_by(id=reftarget_id)
if isinstance(reftarget, nodes.target) \
and not reftarget.resolved and reftarget.hasattr('refname'):
if hasattr(target, 'multiply_indirect'):
#and target.multiply_indirect):
#del target.multiply_indirect
self.circular_indirect_reference(target)
return
target.multiply_indirect = 1
self.resolve_indirect_target(reftarget) # multiply indirect
del target.multiply_indirect
if reftarget.hasattr('refuri'):
target['refuri'] = reftarget['refuri']
if 'refid' in target:
del target['refid']
elif reftarget.hasattr('refid'):
target['refid'] = reftarget['refid']
self.document.note_refid(target)
else:
if reftarget['ids']:
target['refid'] = reftarget_id
self.document.note_refid(target)
else:
self.nonexistent_indirect_target(target)
return
if refname is not None:
del target['refname']
target.resolved = 1
def nonexistent_indirect_target(self, target):
if target['refname'] in self.document.nameids:
self.indirect_target_error(target, 'which is a duplicate, and '
'cannot be used as a unique reference')
else:
self.indirect_target_error(target, 'which does not exist')
def circular_indirect_reference(self, target):
self.indirect_target_error(target, 'forming a circular reference')
def indirect_target_error(self, target, explanation):
naming = ''
reflist = []
if target['names']:
naming = '"%s" ' % target['names'][0]
for name in target['names']:
reflist.extend(self.document.refnames.get(name, []))
for id in target['ids']:
reflist.extend(self.document.refids.get(id, []))
if target['ids']:
naming += '(id="%s")' % target['ids'][0]
msg = self.document.reporter.error(
'Indirect hyperlink target %s refers to target "%s", %s.'
% (naming, target['refname'], explanation), base_node=target)
msgid = self.document.set_id(msg)
for ref in utils.uniq(reflist):
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
target.resolved = 1
def resolve_indirect_references(self, target):
if target.hasattr('refid'):
attname = 'refid'
call_method = self.document.note_refid
elif target.hasattr('refuri'):
attname = 'refuri'
call_method = None
else:
return
attval = target[attname]
for name in target['names']:
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref[attname] = attval
if call_method:
call_method(ref)
ref.resolved = 1
if isinstance(ref, nodes.target):
self.resolve_indirect_references(ref)
for id in target['ids']:
reflist = self.document.refids.get(id, [])
if reflist:
target.note_referenced_by(id=id)
for ref in reflist:
if ref.resolved:
continue
del ref['refid']
ref[attname] = attval
if call_method:
call_method(ref)
ref.resolved = 1
if isinstance(ref, nodes.target):
self.resolve_indirect_references(ref)
class ExternalTargets(Transform):
"""
Given::
<paragraph>
<reference refname="direct external">
direct external
<target id="id1" name="direct external" refuri="http://direct">
The "refname" attribute is replaced by the direct "refuri" attribute::
<paragraph>
<reference refuri="http://direct">
direct external
<target id="id1" name="direct external" refuri="http://direct">
"""
default_priority = 640
def apply(self):
for target in self.document.traverse(nodes.target):
if target.hasattr('refuri'):
refuri = target['refuri']
for name in target['names']:
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref['refuri'] = refuri
ref.resolved = 1
class InternalTargets(Transform):
default_priority = 660
def apply(self):
for target in self.document.traverse(nodes.target):
if not target.hasattr('refuri') and not target.hasattr('refid'):
self.resolve_reference_ids(target)
def resolve_reference_ids(self, target):
"""
Given::
<paragraph>
<reference refname="direct internal">
direct internal
<target id="id1" name="direct internal">
The "refname" attribute is replaced by "refid" linking to the target's
"id"::
<paragraph>
<reference refid="id1">
direct internal
<target id="id1" name="direct internal">
"""
for name in target['names']:
refid = self.document.nameids.get(name)
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
if refid:
del ref['refname']
ref['refid'] = refid
ref.resolved = 1
class Footnotes(Transform):
"""
Assign numbers to autonumbered footnotes, and resolve links to footnotes,
citations, and their references.
Given the following ``document`` as input::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refname="footnote">
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2">
<footnote auto="1" id="id3">
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote">
<paragraph>
Labeled autonumbered footnote.
Auto-numbered footnotes have attribute ``auto="1"`` and no label.
Auto-numbered footnote_references have no reference text (they're
empty elements). When resolving the numbering, a ``label`` element
is added to the beginning of the ``footnote``, and reference text
to the ``footnote_reference``.
The transformed result will be::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refid="footnote">
2
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2" refid="id3">
1
<footnote auto="1" id="id3" backrefs="id2">
<label>
1
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote" backrefs="id1">
<label>
2
<paragraph>
Labeled autonumbered footnote.
Note that the footnotes are not in the same order as the references.
The labels and reference text are added to the auto-numbered ``footnote``
and ``footnote_reference`` elements. Footnote elements are backlinked to
their references via "refids" attributes. References are assigned "id"
and "refid" attributes.
After adding labels and reference text, the "auto" attributes can be
ignored.
"""
default_priority = 620
autofootnote_labels = None
"""Keep track of unlabeled autonumbered footnotes."""
symbols = [
# Entries 1-4 and 6 below are from section 12.51 of
# The Chicago Manual of Style, 14th edition.
'*', # asterisk/star
u'\u2020', # dagger †
u'\u2021', # double dagger ‡
u'\u00A7', # section mark §
u'\u00B6', # paragraph mark (pilcrow) ¶
# (parallels ['||'] in CMoS)
'#', # number sign
# The entries below were chosen arbitrarily.
u'\u2660', # spade suit ♠
u'\u2665', # heart suit ♥
u'\u2666', # diamond suit ♦
u'\u2663', # club suit ♣
]
def apply(self):
self.autofootnote_labels = []
startnum = self.document.autofootnote_start
self.document.autofootnote_start = self.number_footnotes(startnum)
self.number_footnote_references(startnum)
self.symbolize_footnotes()
self.resolve_footnotes_and_citations()
def number_footnotes(self, startnum):
"""
Assign numbers to autonumbered footnotes.
For labeled autonumbered footnotes, copy the number over to
corresponding footnote references.
"""
for footnote in self.document.autofootnotes:
while True:
label = str(startnum)
startnum += 1
if label not in self.document.nameids:
break
footnote.insert(0, nodes.label('', label))
for name in footnote['names']:
for ref in self.document.footnote_refs.get(name, []):
ref += nodes.Text(label)
ref.delattr('refname')
assert len(footnote['ids']) == len(ref['ids']) == 1
ref['refid'] = footnote['ids'][0]
footnote.add_backref(ref['ids'][0])
self.document.note_refid(ref)
ref.resolved = 1
if not footnote['names'] and not footnote['dupnames']:
footnote['names'].append(label)
self.document.note_explicit_target(footnote, footnote)
self.autofootnote_labels.append(label)
return startnum
def number_footnote_references(self, startnum):
"""Assign numbers to autonumbered footnote references."""
i = 0
for ref in self.document.autofootnote_refs:
if ref.resolved or ref.hasattr('refid'):
continue
try:
label = self.autofootnote_labels[i]
except IndexError:
msg = self.document.reporter.error(
'Too many autonumbered footnote references: only %s '
'corresponding footnotes available.'
% len(self.autofootnote_labels), base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.autofootnote_refs[i:]:
if ref.resolved or ref.hasattr('refname'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
ref += nodes.Text(label)
id = self.document.nameids[label]
footnote = self.document.ids[id]
ref['refid'] = id
self.document.note_refid(ref)
assert len(ref['ids']) == 1
footnote.add_backref(ref['ids'][0])
ref.resolved = 1
i += 1
def symbolize_footnotes(self):
"""Add symbols indexes to "[*]"-style footnotes and references."""
labels = []
for footnote in self.document.symbol_footnotes:
reps, index = divmod(self.document.symbol_footnote_start,
len(self.symbols))
labeltext = self.symbols[index] * (reps + 1)
labels.append(labeltext)
footnote.insert(0, nodes.label('', labeltext))
self.document.symbol_footnote_start += 1
self.document.set_id(footnote)
i = 0
for ref in self.document.symbol_footnote_refs:
try:
ref += nodes.Text(labels[i])
except IndexError:
msg = self.document.reporter.error(
'Too many symbol footnote references: only %s '
'corresponding footnotes available.' % len(labels),
base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.symbol_footnote_refs[i:]:
if ref.resolved or ref.hasattr('refid'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
footnote = self.document.symbol_footnotes[i]
assert len(footnote['ids']) == 1
ref['refid'] = footnote['ids'][0]
self.document.note_refid(ref)
footnote.add_backref(ref['ids'][0])
i += 1
def resolve_footnotes_and_citations(self):
"""
Link manually-labeled footnotes and citations to/from their
references.
"""
for footnote in self.document.footnotes:
for label in footnote['names']:
if label in self.document.footnote_refs:
reflist = self.document.footnote_refs[label]
self.resolve_references(footnote, reflist)
for citation in self.document.citations:
for label in citation['names']:
if label in self.document.citation_refs:
reflist = self.document.citation_refs[label]
self.resolve_references(citation, reflist)
def resolve_references(self, note, reflist):
assert len(note['ids']) == 1
id = note['ids'][0]
for ref in reflist:
if ref.resolved:
continue
ref.delattr('refname')
ref['refid'] = id
assert len(ref['ids']) == 1
note.add_backref(ref['ids'][0])
ref.resolved = 1
note.resolved = 1
class CircularSubstitutionDefinitionError(Exception): pass
class Substitutions(Transform):
"""
Given the following ``document`` as input::
<document>
<paragraph>
The
<substitution_reference refname="biohazard">
biohazard
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
The ``substitution_reference`` will simply be replaced by the
contents of the corresponding ``substitution_definition``.
The transformed result will be::
<document>
<paragraph>
The
<image alt="biohazard" uri="biohazard.png">
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
"""
default_priority = 220
"""The Substitutions transform has to be applied very early, before
`docutils.tranforms.frontmatter.DocTitle` and others."""
def apply(self):
defs = self.document.substitution_defs
normed = self.document.substitution_names
subreflist = self.document.traverse(nodes.substitution_reference)
nested = {}
for ref in subreflist:
refname = ref['refname']
key = None
if refname in defs:
key = refname
else:
normed_name = refname.lower()
if normed_name in normed:
key = normed[normed_name]
if key is None:
msg = self.document.reporter.error(
'Undefined substitution referenced: "%s".'
% refname, base_node=ref)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
else:
subdef = defs[key]
parent = ref.parent
index = parent.index(ref)
if ('ltrim' in subdef.attributes
or 'trim' in subdef.attributes):
if index > 0 and isinstance(parent[index - 1],
nodes.Text):
parent.replace(parent[index - 1],
parent[index - 1].rstrip())
if ('rtrim' in subdef.attributes
or 'trim' in subdef.attributes):
if (len(parent) > index + 1
and isinstance(parent[index + 1], nodes.Text)):
parent.replace(parent[index + 1],
parent[index + 1].lstrip())
subdef_copy = subdef.deepcopy()
try:
# Take care of nested substitution references:
for nested_ref in subdef_copy.traverse(
nodes.substitution_reference):
nested_name = normed[nested_ref['refname'].lower()]
if nested_name in nested.setdefault(nested_name, []):
raise CircularSubstitutionDefinitionError
else:
nested[nested_name].append(key)
subreflist.append(nested_ref)
except CircularSubstitutionDefinitionError:
parent = ref.parent
if isinstance(parent, nodes.substitution_definition):
msg = self.document.reporter.error(
'Circular substitution definition detected:',
nodes.literal_block(parent.rawsource,
parent.rawsource),
line=parent.line, base_node=parent)
parent.replace_self(msg)
else:
msg = self.document.reporter.error(
'Circular substitution definition referenced: "%s".'
% refname, base_node=ref)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
else:
ref.replace_self(subdef_copy.children)
# register refname of the replacment node(s)
# (needed for resolution of references)
for node in subdef_copy.children:
if isinstance(node, nodes.Referential):
# HACK: verify refname attribute exists.
# Test with docs/dev/todo.txt, see. |donate|
if 'refname' in node:
self.document.note_refname(node)
class TargetNotes(Transform):
"""
Creates a footnote for each external target in the text, and corresponding
footnote references after each reference.
"""
default_priority = 540
"""The TargetNotes transform has to be applied after `IndirectHyperlinks`
but before `Footnotes`."""
def __init__(self, document, startnode):
Transform.__init__(self, document, startnode=startnode)
self.classes = startnode.details.get('class', [])
def apply(self):
notes = {}
nodelist = []
for target in self.document.traverse(nodes.target):
# Only external targets.
if not target.hasattr('refuri'):
continue
names = target['names']
refs = []
for name in names:
refs.extend(self.document.refnames.get(name, []))
if not refs:
continue
footnote = self.make_target_footnote(target['refuri'], refs,
notes)
if target['refuri'] not in notes:
notes[target['refuri']] = footnote
nodelist.append(footnote)
# Take care of anonymous references.
for ref in self.document.traverse(nodes.reference):
if not ref.get('anonymous'):
continue
if ref.hasattr('refuri'):
footnote = self.make_target_footnote(ref['refuri'], [ref],
notes)
if ref['refuri'] not in notes:
notes[ref['refuri']] = footnote
nodelist.append(footnote)
self.startnode.replace_self(nodelist)
def make_target_footnote(self, refuri, refs, notes):
if refuri in notes: # duplicate?
footnote = notes[refuri]
assert len(footnote['names']) == 1
footnote_name = footnote['names'][0]
else: # original
footnote = nodes.footnote()
footnote_id = self.document.set_id(footnote)
# Use uppercase letters and a colon; they can't be
# produced inside names by the parser.
footnote_name = 'TARGET_NOTE: ' + footnote_id
footnote['auto'] = 1
footnote['names'] = [footnote_name]
footnote_paragraph = nodes.paragraph()
footnote_paragraph += nodes.reference('', refuri, refuri=refuri)
footnote += footnote_paragraph
self.document.note_autofootnote(footnote)
self.document.note_explicit_target(footnote, footnote)
for ref in refs:
if isinstance(ref, nodes.target):
continue
refnode = nodes.footnote_reference(refname=footnote_name, auto=1)
refnode['classes'] += self.classes
self.document.note_autofootnote_ref(refnode)
self.document.note_footnote_ref(refnode)
index = ref.parent.index(ref) + 1
reflist = [refnode]
if not utils.get_trim_footnote_ref_space(self.document.settings):
if self.classes:
reflist.insert(0, nodes.inline(text=' ', Classes=self.classes))
else:
reflist.insert(0, nodes.Text(' '))
ref.parent.insert(index, reflist)
return footnote
class DanglingReferences(Transform):
"""
Check for dangling references (incl. footnote & citation) and for
unreferenced targets.
"""
default_priority = 850
def apply(self):
visitor = DanglingReferencesVisitor(
self.document,
self.document.transformer.unknown_reference_resolvers)
self.document.walk(visitor)
# *After* resolving all references, check for unreferenced
# targets:
for target in self.document.traverse(nodes.target):
if not target.referenced:
if target.get('anonymous'):
# If we have unreferenced anonymous targets, there
# is already an error message about anonymous
# hyperlink mismatch; no need to generate another
# message.
continue
if target['names']:
naming = target['names'][0]
elif target['ids']:
naming = target['ids'][0]
else:
# Hack: Propagated targets always have their refid
# attribute set.
naming = target['refid']
self.document.reporter.info(
'Hyperlink target "%s" is not referenced.'
% naming, base_node=target)
class DanglingReferencesVisitor(nodes.SparseNodeVisitor):
def __init__(self, document, unknown_reference_resolvers):
nodes.SparseNodeVisitor.__init__(self, document)
self.document = document
self.unknown_reference_resolvers = unknown_reference_resolvers
def unknown_visit(self, node):
pass
def visit_reference(self, node):
if node.resolved or not node.hasattr('refname'):
return
refname = node['refname']
id = self.document.nameids.get(refname)
if id is None:
for resolver_function in self.unknown_reference_resolvers:
if resolver_function(node):
break
else:
if refname in self.document.nameids:
msg = self.document.reporter.error(
'Duplicate target name, cannot be used as a unique '
'reference: "%s".' % (node['refname']), base_node=node)
else:
msg = self.document.reporter.error(
'Unknown target name: "%s".' % (node['refname']),
base_node=node)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
node.rawsource, node.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
node.replace_self(prb)
else:
del node['refname']
node['refid'] = id
self.document.ids[id].note_referenced_by(id=id)
node.resolved = 1
visit_footnote_reference = visit_citation_reference = visit_reference
|
blazek/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsnullsymbolrenderer.py
|
45
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsnullsymbolrenderer.py
-----------------------------
Date : April 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'April 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsNullSymbolRenderer)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsNullSymbolRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.renderer = QgsNullSymbolRenderer()
self.layer.setRenderer(self.renderer)
rendered_layers = [self.layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def testRender(self):
# test no features are rendered
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('null_renderer')
renderchecker.setControlName('expected_nullrenderer_render')
result = renderchecker.runTest('nullrenderer_render')
assert result
def testSelected(self):
# select a feature and render
self.layer.select([1, 2, 3])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('null_renderer')
renderchecker.setControlName('expected_nullrenderer_selected')
result = renderchecker.runTest('nullrenderer_selected')
assert result
if __name__ == '__main__':
unittest.main()
|
tarzan0820/odoo
|
refs/heads/8.0
|
addons/mrp_repair/wizard/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-google-auth-oauthlib/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyGoogleAuthOauthlib(PythonPackage):
"""This library provides oauthlib integration with google-auth."""
homepage = "https://github.com/googleapis/google-auth-library-python-oauthlib"
url = "https://pypi.io/packages/source/g/google-auth-oauthlib/google-auth-oauthlib-0.4.1.tar.gz"
version('0.4.1', sha256='88d2cd115e3391eb85e1243ac6902e76e77c5fe438b7276b297fbe68015458dd')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-google-auth', type=('build', 'run'))
depends_on('py-requests-oauthlib@0.7.0:', type=('build', 'run'))
|
tqtran7/horizon
|
refs/heads/master
|
openstack_dashboard/test/integration_tests/helpers.py
|
43
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import traceback
import uuid
import testtools
import xvfbwrapper
from openstack_dashboard.test.integration_tests import config
from openstack_dashboard.test.integration_tests.pages import loginpage
from openstack_dashboard.test.integration_tests import webdriver
def gen_random_resource_name(resource="", timestamp=True):
"""Generate random resource name using uuid and timestamp.
Input fields are usually limited to 255 or 80 characters hence their
provide enough space for quite long resource names, but it might be
the case that maximum field length is quite restricted, it is then
necessary to consider using shorter resource argument or avoid using
timestamp by setting timestamp argument to False.
"""
fields = ["horizon"]
if resource:
fields.append(resource)
if timestamp:
tstamp = time.strftime("%d-%m-%H-%M-%S")
fields.append(tstamp)
fields.append(str(uuid.uuid4()).replace("-", ""))
return "_".join(fields)
class BaseTestCase(testtools.TestCase):
CONFIG = config.get_config()
def setUp(self):
if os.environ.get('INTEGRATION_TESTS', False):
# Start a virtual display server for running the tests headless.
if os.environ.get('SELENIUM_HEADLESS', False):
self.vdisplay = xvfbwrapper.Xvfb(width=1280, height=720)
# workaround for memory leak in Xvfb taken from: http://blog.
# jeffterrace.com/2012/07/xvfb-memory-leak-workaround.html
self.vdisplay.xvfb_cmd.append("-noreset")
# disables X access control
self.vdisplay.xvfb_cmd.append("-ac")
self.vdisplay.start()
# Start the Selenium webdriver and setup configuration.
self.driver = webdriver.WebDriverWrapper()
self.driver.maximize_window()
self.driver.implicitly_wait(self.CONFIG.selenium.implicit_wait)
self.driver.set_page_load_timeout(
self.CONFIG.selenium.page_timeout)
self.addOnException(self._dump_page_html_source)
else:
msg = "The INTEGRATION_TESTS env variable is not set."
raise self.skipException(msg)
super(BaseTestCase, self).setUp()
def _dump_page_html_source(self, exc_info):
content = None
try:
pg_source = self._get_page_html_source()
content = testtools.content.Content(
testtools.content_type.ContentType('text', 'html'),
lambda: pg_source)
except Exception:
exc_traceback = traceback.format_exc()
content = testtools.content.text_content(exc_traceback)
finally:
self.addDetail("PageHTMLSource.html", content)
def _get_page_html_source(self):
"""Gets html page source.
self.driver.page_source is not used on purpose because it does not
display html code generated/changed by javascript.
"""
html_elem = self.driver.find_element_by_tag_name("html")
return html_elem.get_attribute("innerHTML").encode("UTF-8")
def tearDown(self):
if os.environ.get('INTEGRATION_TESTS', False):
self.driver.quit()
if hasattr(self, 'vdisplay'):
self.vdisplay.stop()
super(BaseTestCase, self).tearDown()
class TestCase(BaseTestCase):
TEST_USER_NAME = BaseTestCase.CONFIG.identity.username
TEST_PASSWORD = BaseTestCase.CONFIG.identity.password
def setUp(self):
super(TestCase, self).setUp()
self.login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
self.login_pg.go_to_login_page()
self.home_pg = self.login_pg.login(self.TEST_USER_NAME,
self.TEST_PASSWORD)
def tearDown(self):
try:
if self.home_pg.is_logged_in:
self.home_pg.go_to_home_page()
self.home_pg.log_out()
finally:
super(TestCase, self).tearDown()
class AdminTestCase(TestCase):
TEST_USER_NAME = TestCase.CONFIG.identity.admin_username
TEST_PASSWORD = TestCase.CONFIG.identity.admin_password
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.