repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
Kast0rTr0y/ansible
refs/heads/devel
lib/ansible/utils/module_docs_fragments/openswitch.py
166
# # (c) 2015, Peter Sprygada <psprygada@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. Note this argument does not affect the SSH argument. required: true port: description: - Specifies the port to use when building the connection to the remote device. This value applies to either I(cli) or I(rest). The port value will default to the appropriate transport common port if none is provided in the task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. required: false default: 0 (use common port) username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not affect the SSH transport. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. required: false password: description: - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used for either I(cli) or I(rest) transports. Note this argument does not affect the SSH transport. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. required: false default: null timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. If the timeout is exceeded before the operation is completed, the module will error. require: false default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for the I(cli) transports. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. required: false transport: description: - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity to the device over ssh, cli or REST. required: true default: ssh choices: ['ssh', 'cli', 'rest'] use_ssl: description: - Configures the I(transport) to use SSL if set to true only when the I(transport) argument is configured as rest. If the transport argument is not I(rest), this value is ignored. required: false default: yes choices: ['yes', 'no'] provider: description: - Convenience method that allows all I(openswitch) arguments to be passed as a dict object. All constraints (required, choices, etc) must be met either by individual arguments or values in this dict. required: false default: null """
alec-heif/MIT-Thesis
refs/heads/master
spark-bin/examples/src/main/python/mllib/standard_scaler_example.py
95
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from pyspark import SparkContext # $example on$ from pyspark.mllib.feature import StandardScaler, StandardScalerModel from pyspark.mllib.linalg import Vectors from pyspark.mllib.util import MLUtils # $example off$ if __name__ == "__main__": sc = SparkContext(appName="StandardScalerExample") # SparkContext # $example on$ data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") label = data.map(lambda x: x.label) features = data.map(lambda x: x.features) scaler1 = StandardScaler().fit(features) scaler2 = StandardScaler(withMean=True, withStd=True).fit(features) # data1 will be unit variance. data1 = label.zip(scaler1.transform(features)) # data2 will be unit variance and zero mean. data2 = label.zip(scaler2.transform(features.map(lambda x: Vectors.dense(x.toArray())))) # $example off$ print("data1:") for each in data1.collect(): print(each) print("data2:") for each in data2.collect(): print(each) sc.stop()
Jannes123/django-oscar
refs/heads/master
src/oscar/apps/dashboard/promotions/forms.py
22
from django import forms from django.conf import settings from django.forms.models import inlineformset_factory from django.utils.translation import ugettext_lazy as _ from oscar.apps.promotions.conf import PROMOTION_CLASSES from oscar.core.loading import get_class, get_classes from oscar.forms.fields import ExtendedURLField HandPickedProductList, RawHTML, SingleProduct, PagePromotion, OrderedProduct \ = get_classes('promotions.models', ['HandPickedProductList', 'RawHTML', 'SingleProduct', 'PagePromotion', 'OrderedProduct']) ProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect') class PromotionTypeSelectForm(forms.Form): choices = [] for klass in PROMOTION_CLASSES: choices.append((klass.classname(), klass._meta.verbose_name)) promotion_type = forms.ChoiceField(choices=tuple(choices), label=_("Promotion type")) class RawHTMLForm(forms.ModelForm): class Meta: model = RawHTML fields = ['name', 'body'] def __init__(self, *args, **kwargs): super(RawHTMLForm, self).__init__(*args, **kwargs) self.fields['body'].widget.attrs['class'] = "no-widget-init" class SingleProductForm(forms.ModelForm): class Meta: model = SingleProduct fields = ['name', 'product', 'description'] widgets = {'product': ProductSelect} def __init__(self, *args, **kwargs): super(SingleProductForm, self).__init__(*args, **kwargs) self.fields['product'].widget.attrs['class'] = "select2 input-xlarge" class HandPickedProductListForm(forms.ModelForm): class Meta: model = HandPickedProductList fields = ['name', 'description', 'link_url', 'link_text'] class OrderedProductForm(forms.ModelForm): class Meta: model = OrderedProduct fields = ['list', 'product', 'display_order'] widgets = { 'product': ProductSelect, } def __init__(self, *args, **kwargs): super(OrderedProductForm, self).__init__(*args, **kwargs) self.fields['product'].widget.attrs['class'] = "select2 input-xlarge" OrderedProductFormSet = inlineformset_factory( HandPickedProductList, OrderedProduct, form=OrderedProductForm, extra=2) class PagePromotionForm(forms.ModelForm): page_url = ExtendedURLField(label=_("URL"), verify_exists=True) position = forms.CharField( widget=forms.Select(choices=settings.OSCAR_PROMOTION_POSITIONS), label=_("Position"), help_text=_("Where in the page this content block will appear")) class Meta: model = PagePromotion fields = ['position', 'page_url'] def clean_page_url(self): page_url = self.cleaned_data.get('page_url') if not page_url: return page_url if page_url.startswith('http'): raise forms.ValidationError( _("Content blocks can only be linked to internal URLs")) if page_url.startswith('/') and not page_url.endswith('/'): page_url += '/' return page_url
probablytom/tomwallis.net
refs/heads/master
venv/lib/python2.7/site-packages/django/contrib/gis/geoip/tests.py
57
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os import unittest from unittest import skipUnless from django.conf import settings from django.contrib.gis.geos import HAS_GEOS from django.contrib.gis.geoip import HAS_GEOIP from django.utils import six if HAS_GEOIP: from . import GeoIP, GeoIPException if HAS_GEOS: from ..geos import GEOSGeometry # Note: Requires use of both the GeoIP country and city datasets. # The GEOIP_DATA path should be the only setting set (the directory # should contain links or the actual database files 'GeoIP.dat' and # 'GeoLiteCity.dat'. @skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None), "GeoIP is required along with the GEOIP_PATH setting.") class GeoIPTest(unittest.TestCase): def test01_init(self): "Testing GeoIP initialization." g1 = GeoIP() # Everything inferred from GeoIP path path = settings.GEOIP_PATH g2 = GeoIP(path, 0) # Passing in data path explicitly. g3 = GeoIP.open(path, 0) # MaxMind Python API syntax. for g in (g1, g2, g3): self.assertEqual(True, bool(g._country)) self.assertEqual(True, bool(g._city)) # Only passing in the location of one database. city = os.path.join(path, 'GeoLiteCity.dat') cntry = os.path.join(path, 'GeoIP.dat') g4 = GeoIP(city, country='') self.assertEqual(None, g4._country) g5 = GeoIP(cntry, city='') self.assertEqual(None, g5._city) # Improper parameters. bad_params = (23, 'foo', 15.23) for bad in bad_params: self.assertRaises(GeoIPException, GeoIP, cache=bad) if isinstance(bad, six.string_types): e = GeoIPException else: e = TypeError self.assertRaises(e, GeoIP, bad, 0) def test02_bad_query(self): "Testing GeoIP query parameter checking." cntry_g = GeoIP(city='<foo>') # No city database available, these calls should fail. self.assertRaises(GeoIPException, cntry_g.city, 'google.com') self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com') # Non-string query should raise TypeError self.assertRaises(TypeError, cntry_g.country_code, 17) self.assertRaises(TypeError, cntry_g.country_name, GeoIP) def test03_country(self): "Testing GeoIP country querying methods." g = GeoIP(city='<foo>') fqdn = 'www.google.com' addr = '12.215.42.19' for query in (fqdn, addr): for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name): self.assertEqual('US', func(query)) for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name): self.assertEqual('United States', func(query)) self.assertEqual({'country_code': 'US', 'country_name': 'United States'}, g.country(query)) @skipUnless(HAS_GEOS, "Geos is required") def test04_city(self): "Testing GeoIP city querying methods." g = GeoIP(country='<foo>') addr = '128.249.1.1' fqdn = 'tmc.edu' for query in (fqdn, addr): # Country queries should still work. for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name): self.assertEqual('US', func(query)) for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name): self.assertEqual('United States', func(query)) self.assertEqual({'country_code': 'US', 'country_name': 'United States'}, g.country(query)) # City information dictionary. d = g.city(query) self.assertEqual('USA', d['country_code3']) self.assertEqual('Houston', d['city']) self.assertEqual('TX', d['region']) self.assertEqual(713, d['area_code']) geom = g.geos(query) self.assertTrue(isinstance(geom, GEOSGeometry)) lon, lat = (-95.4010, 29.7079) lat_lon = g.lat_lon(query) lat_lon = (lat_lon[1], lat_lon[0]) for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon): self.assertAlmostEqual(lon, tup[0], 4) self.assertAlmostEqual(lat, tup[1], 4) def test05_unicode_response(self): "Testing that GeoIP strings are properly encoded, see #16553." g = GeoIP() d = g.city("www.osnabrueck.de") self.assertEqual('Osnabrück', d['city']) d = g.country('200.7.49.81') self.assertEqual('Curaçao', d['country_name'])
nhippenmeyer/django
refs/heads/master
tests/template_tests/filter_tests/test_timeuntil.py
207
from __future__ import unicode_literals from datetime import datetime, timedelta from django.template.defaultfilters import timeuntil_filter from django.test import SimpleTestCase from django.test.utils import requires_tz_support from ..utils import setup from .timezone_utils import TimezoneTestCase class TimeuntilTests(TimezoneTestCase): # Default compare with datetime.now() @setup({'timeuntil01': '{{ a|timeuntil }}'}) def test_timeuntil01(self): output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)}) self.assertEqual(output, '2\xa0minutes') @setup({'timeuntil02': '{{ a|timeuntil }}'}) def test_timeuntil02(self): output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))}) self.assertEqual(output, '1\xa0day') @setup({'timeuntil03': '{{ a|timeuntil }}'}) def test_timeuntil03(self): output = self.engine.render_to_string('timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))}) self.assertEqual(output, '8\xa0hours, 10\xa0minutes') # Compare to a given parameter @setup({'timeuntil04': '{{ a|timeuntil:b }}'}) def test_timeuntil04(self): output = self.engine.render_to_string( 'timeuntil04', {'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)}, ) self.assertEqual(output, '1\xa0day') @setup({'timeuntil05': '{{ a|timeuntil:b }}'}) def test_timeuntil05(self): output = self.engine.render_to_string( 'timeuntil05', {'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)}, ) self.assertEqual(output, '1\xa0minute') # Regression for #7443 @setup({'timeuntil06': '{{ earlier|timeuntil }}'}) def test_timeuntil06(self): output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil07': '{{ earlier|timeuntil:now }}'}) def test_timeuntil07(self): output = self.engine.render_to_string('timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil08': '{{ later|timeuntil }}'}) def test_timeuntil08(self): output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)}) self.assertEqual(output, '1\xa0week') @setup({'timeuntil09': '{{ later|timeuntil:now }}'}) def test_timeuntil09(self): output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)}) self.assertEqual(output, '1\xa0week') # Ensures that differing timezones are calculated correctly. @requires_tz_support @setup({'timeuntil10': '{{ a|timeuntil }}'}) def test_timeuntil10(self): output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz}) self.assertEqual(output, '0\xa0minutes') @requires_tz_support @setup({'timeuntil11': '{{ a|timeuntil }}'}) def test_timeuntil11(self): output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil12': '{{ a|timeuntil:b }}'}) def test_timeuntil12(self): output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz}) self.assertEqual(output, '0\xa0minutes') # Regression for #9065 (two date objects). @setup({'timeuntil13': '{{ a|timeuntil:b }}'}) def test_timeuntil13(self): output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil14': '{{ a|timeuntil:b }}'}) def test_timeuntil14(self): output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)}) self.assertEqual(output, '1\xa0day') class FunctionTests(SimpleTestCase): def test_until_now(self): self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day') def test_explicit_date(self): self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
Slezhuk/ansible
refs/heads/devel
lib/ansible/utils/module_docs_fragments/ios.py
101
# # (c) 2015, Peter Sprygada <psprygada@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] auth_pass: description: - Specifies the password to use if required to enter privileged mode on the remote device. If I(authorize) is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. default: none provider: description: - A dict object containing connection details. default: null suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. default: 22 username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Specifies the password to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. default: null timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. If the timeout is exceeded before the operation is completed, the module will error. default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This value is the path to the key used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] auth_pass: description: - Specifies the password to use if required to enter privileged mode on the remote device. If I(authorize) is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. default: none """
ktnyt/chainer
refs/heads/master
chainer/functions/math/fft.py
2
from chainer import backend from chainer import function_node from chainer.utils import type_check class FFT(function_node.FunctionNode): """Fast Fourier transform.""" def __init__(self, method): self._method = method def check_type_forward(self, in_types): type_check._argname(in_types, ('real', 'imag')) r_type, i_type = in_types type_check.expect( r_type.dtype.kind == 'f', r_type.ndim > 0, r_type.shape == i_type.shape, r_type.dtype == i_type.dtype, ) def forward(self, inputs): xp = backend.get_array_module(*inputs) real, imag = inputs x = real + imag * 1j y = getattr(xp.fft, self._method)(x) real_y = y.real.astype(real.dtype, copy=False) imag_y = y.imag.astype(imag.dtype, copy=False) return real_y, imag_y def backward(self, inputs, grads): gr, gi = grads xp = backend.get_array_module(*grads) if gr is None: gr = xp.zeros_like(gi.data) if gi is None: gi = xp.zeros_like(gr.data) gxi, gxr = FFT(self._method).apply((gi, gr)) return gxr, gxi def fft(x): """Fast Fourier transform. Args: x (tuple): ``(real, imag)`` where ``real`` is a :class:`~chainer.Variable` storing the real part and ``imag`` is a :class:`~chainer.Variable` storing the imaginary part. Returns: tuple: Returns ``(ry, ri)`` where ``ry`` is the real part of the result and ``ri`` is the imaginary part of the result. .. note:: Currently this function supports a tuple as input. It will support a complex numbers directly in the future. """ real, imag = x return FFT('fft').apply((real, imag)) def ifft(x): """Inverse fast Fourier transform. Args: x (tuple): ``(real, imag)`` where ``real`` is a :class:`~chainer.Variable` storing the real part and ``imag`` is a :class:`~chainer.Variable` storing the imaginary part. Returns: tuple: Returns ``(ry, ri)`` where ``ry`` is the real part of the result and ``ri`` is the imaginary part of the result. .. note:: Currently this function supports a tuple as input. It will support a complex numbers directly in the future. """ real, imag = x return FFT('ifft').apply((real, imag))
midma101/AndIWasJustGoingToBed
refs/heads/master
.venv/lib/python2.7/site-packages/pip/commands/install.py
187
from __future__ import absolute_import import logging import operator import os import tempfile import shutil import warnings try: import wheel except ImportError: wheel = None from pip.req import RequirementSet from pip.basecommand import RequirementCommand from pip.locations import virtualenv_no_global, distutils_scheme from pip.index import PackageFinder from pip.exceptions import ( InstallationError, CommandError, PreviousBuildDirError, ) from pip import cmdoptions from pip.utils import ensure_dir from pip.utils.build import BuildDirectory from pip.utils.deprecation import RemovedInPip8Warning from pip.utils.filesystem import check_path_owner from pip.wheel import WheelCache, WheelBuilder logger = logging.getLogger(__name__) class InstallCommand(RequirementCommand): """ Install packages from: - PyPI (and other indexes) using requirement specifiers. - VCS project urls. - Local project directories. - Local or remote source archives. pip also supports installing from "requirements files", which provide an easy way to specify a whole environment to be installed. """ name = 'install' usage = """ %prog [options] <requirement specifier> [package-index-options] ... %prog [options] -r <requirements file> [package-index-options] ... %prog [options] [-e] <vcs project url> ... %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" summary = 'Install packages.' def __init__(self, *args, **kw): super(InstallCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option(cmdoptions.constraints()) cmd_opts.add_option(cmdoptions.editable()) cmd_opts.add_option(cmdoptions.requirements()) cmd_opts.add_option(cmdoptions.build_dir()) cmd_opts.add_option( '-t', '--target', dest='target_dir', metavar='dir', default=None, help='Install packages into <dir>. ' 'By default this will not replace existing files/folders in ' '<dir>. Use --upgrade to replace existing packages in <dir> ' 'with new versions.' ) cmd_opts.add_option( '-d', '--download', '--download-dir', '--download-directory', dest='download_dir', metavar='dir', default=None, help=("Download packages into <dir> instead of installing them, " "regardless of what's already installed."), ) cmd_opts.add_option(cmdoptions.download_cache()) cmd_opts.add_option(cmdoptions.src()) cmd_opts.add_option( '-U', '--upgrade', dest='upgrade', action='store_true', help='Upgrade all specified packages to the newest available ' 'version. This process is recursive regardless of whether ' 'a dependency is already satisfied.' ) cmd_opts.add_option( '--force-reinstall', dest='force_reinstall', action='store_true', help='When upgrading, reinstall all packages even if they are ' 'already up-to-date.') cmd_opts.add_option( '-I', '--ignore-installed', dest='ignore_installed', action='store_true', help='Ignore the installed packages (reinstalling instead).') cmd_opts.add_option(cmdoptions.no_deps()) cmd_opts.add_option(cmdoptions.install_options()) cmd_opts.add_option(cmdoptions.global_options()) cmd_opts.add_option( '--user', dest='use_user_site', action='store_true', help="Install to the Python user install directory for your " "platform. Typically ~/.local/, or %APPDATA%\Python on " "Windows. (See the Python documentation for site.USER_BASE " "for full details.)") cmd_opts.add_option( '--egg', dest='as_egg', action='store_true', help="Install packages as eggs, not 'flat', like pip normally " "does. This option is not about installing *from* eggs. " "(WARNING: Because this option overrides pip's normal install" " logic, requirements files may not behave as expected.)") cmd_opts.add_option( '--root', dest='root_path', metavar='dir', default=None, help="Install everything relative to this alternate root " "directory.") cmd_opts.add_option( "--compile", action="store_true", dest="compile", default=True, help="Compile py files to pyc", ) cmd_opts.add_option( "--no-compile", action="store_false", dest="compile", help="Do not compile py files to pyc", ) cmd_opts.add_option(cmdoptions.use_wheel()) cmd_opts.add_option(cmdoptions.no_use_wheel()) cmd_opts.add_option(cmdoptions.no_binary()) cmd_opts.add_option(cmdoptions.only_binary()) cmd_opts.add_option( '--pre', action='store_true', default=False, help="Include pre-release and development versions. By default, " "pip only finds stable versions.") cmd_opts.add_option(cmdoptions.no_clean()) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, self.parser, ) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) def _build_package_finder(self, options, index_urls, session): """ Create a package finder appropriate to this install command. This method is meant to be overridden by subclasses, not called directly. """ return PackageFinder( find_links=options.find_links, format_control=options.format_control, index_urls=index_urls, allow_external=options.allow_external, allow_unverified=options.allow_unverified, allow_all_external=options.allow_all_external, trusted_hosts=options.trusted_hosts, allow_all_prereleases=options.pre, process_dependency_links=options.process_dependency_links, session=session, ) def run(self, options, args): cmdoptions.resolve_wheel_no_use_binary(options) cmdoptions.check_install_build_global(options) if options.download_dir: options.ignore_installed = True if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) options.src_dir = os.path.abspath(options.src_dir) install_options = options.install_options or [] if options.use_user_site: if virtualenv_no_global(): raise InstallationError( "Can not perform a '--user' install. User site-packages " "are not visible in this virtualenv." ) install_options.append('--user') install_options.append('--prefix=') temp_target_dir = None if options.target_dir: options.ignore_installed = True temp_target_dir = tempfile.mkdtemp() options.target_dir = os.path.abspath(options.target_dir) if (os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir)): raise CommandError( "Target path exists but is not a directory, will not " "continue." ) install_options.append('--home=' + temp_target_dir) global_options = options.global_options or [] index_urls = [options.index_url] + options.extra_index_urls if options.no_index: logger.info('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] if options.download_cache: warnings.warn( "--download-cache has been deprecated and will be removed in " "the future. Pip now automatically uses and configures its " "cache.", RemovedInPip8Warning, ) with self._build_session(options) as session: finder = self._build_package_finder(options, index_urls, session) build_delete = (not (options.no_clean or options.build_dir)) wheel_cache = WheelCache(options.cache_dir, options.format_control) if options.cache_dir and not check_path_owner(options.cache_dir): logger.warning( "The directory '%s' or its parent directory is not owned " "by the current user and caching wheels has been " "disabled. check the permissions and owner of that " "directory. If executing pip with sudo, you may want " "sudo's -H flag.", options.cache_dir, ) options.cache_dir = None with BuildDirectory(options.build_dir, delete=build_delete) as build_dir: requirement_set = RequirementSet( build_dir=build_dir, src_dir=options.src_dir, download_dir=options.download_dir, upgrade=options.upgrade, as_egg=options.as_egg, ignore_installed=options.ignore_installed, ignore_dependencies=options.ignore_dependencies, force_reinstall=options.force_reinstall, use_user_site=options.use_user_site, target_dir=temp_target_dir, session=session, pycompile=options.compile, isolated=options.isolated_mode, wheel_cache=wheel_cache, ) self.populate_requirement_set( requirement_set, args, options, finder, session, self.name, wheel_cache ) if not requirement_set.has_requirements: return try: if (options.download_dir or not wheel or not options.cache_dir): # on -d don't do complex things like building # wheels, and don't try to build wheels when wheel is # not installed. requirement_set.prepare_files(finder) else: # build wheels before install. wb = WheelBuilder( requirement_set, finder, build_options=[], global_options=[], ) # Ignore the result: a failed wheel will be # installed from the sdist/vcs whatever. wb.build(autobuilding=True) if not options.download_dir: requirement_set.install( install_options, global_options, root=options.root_path, ) reqs = sorted( requirement_set.successfully_installed, key=operator.attrgetter('name')) items = [] for req in reqs: item = req.name try: if hasattr(req, 'installed_version'): if req.installed_version: item += '-' + req.installed_version except Exception: pass items.append(item) installed = ' '.join(items) if installed: logger.info('Successfully installed %s', installed) else: downloaded = ' '.join([ req.name for req in requirement_set.successfully_downloaded ]) if downloaded: logger.info( 'Successfully downloaded %s', downloaded ) except PreviousBuildDirError: options.no_clean = True raise finally: # Clean up if not options.no_clean: requirement_set.cleanup_files() if options.target_dir: ensure_dir(options.target_dir) lib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] for item in os.listdir(lib_dir): target_item_dir = os.path.join(options.target_dir, item) if os.path.exists(target_item_dir): if not options.upgrade: logger.warning( 'Target directory %s already exists. Specify ' '--upgrade to force replacement.', target_item_dir ) continue if os.path.islink(target_item_dir): logger.warning( 'Target directory %s already exists and is ' 'a link. Pip will not automatically replace ' 'links, please remove if replacement is ' 'desired.', target_item_dir ) continue if os.path.isdir(target_item_dir): shutil.rmtree(target_item_dir) else: os.remove(target_item_dir) shutil.move( os.path.join(lib_dir, item), target_item_dir ) shutil.rmtree(temp_target_dir) return requirement_set
brian-l/django-1.4.10
refs/heads/master
tests/regressiontests/csrf_tests/tests.py
10
# -*- coding: utf-8 -*- from __future__ import with_statement from django.conf import settings from django.core.context_processors import csrf from django.http import HttpRequest, HttpResponse from django.middleware.csrf import CsrfViewMiddleware, CSRF_KEY_LENGTH from django.template import RequestContext, Template from django.test import TestCase from django.test.utils import override_settings from django.views.decorators.csrf import csrf_exempt, requires_csrf_token, ensure_csrf_cookie # Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests def post_form_response(): resp = HttpResponse(content=u""" <html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html> """, mimetype="text/html") return resp def post_form_view(request): """A view that returns a POST form (without a token)""" return post_form_response() # Response/views used for template tag tests def token_view(request): """A view that uses {% csrf_token %}""" context = RequestContext(request, processors=[csrf]) template = Template("{% csrf_token %}") return HttpResponse(template.render(context)) def non_token_view_using_request_processor(request): """ A view that doesn't use the token, but does use the csrf view processor. """ context = RequestContext(request, processors=[csrf]) template = Template("") return HttpResponse(template.render(context)) class TestingHttpRequest(HttpRequest): """ A version of HttpRequest that allows us to change some things more easily """ def is_secure(self): return getattr(self, '_is_secure_override', False) class CsrfViewMiddlewareTest(TestCase): # The csrf token is potentially from an untrusted source, so could have # characters that need dealing with. _csrf_id_cookie = "<1>\xc2\xa1" _csrf_id = "1" def _get_GET_no_csrf_cookie_request(self): return TestingHttpRequest() def _get_GET_csrf_cookie_request(self): req = TestingHttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie return req def _get_POST_csrf_cookie_request(self): req = self._get_GET_csrf_cookie_request() req.method = "POST" return req def _get_POST_no_csrf_cookie_request(self): req = self._get_GET_no_csrf_cookie_request() req.method = "POST" return req def _get_POST_request_with_token(self): req = self._get_POST_csrf_cookie_request() req.POST['csrfmiddlewaretoken'] = self._csrf_id return req def _check_token_present(self, response, csrf_id=None): self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id)) def test_process_view_token_too_long(self): """ Check that if the token is longer than expected, it is ignored and a new token is created. """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000 CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH) def test_process_response_get_token_used(self): """ When get_token is used, check that the cookie is created and headers patched. """ req = self._get_GET_no_csrf_cookie_request() # Put tests for CSRF_COOKIE_* settings here with self.settings(CSRF_COOKIE_NAME='myname', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get('myname', False) self.assertNotEqual(csrf_cookie, False) self.assertEqual(csrf_cookie['domain'], '.example.com') self.assertEqual(csrf_cookie['secure'], True) self.assertEqual(csrf_cookie['path'], '/test/') self.assertTrue('Cookie' in resp2.get('Vary','')) def test_process_response_get_token_not_used(self): """ Check that if get_token() is not called, the view middleware does not add a cookie. """ # This is important to make pages cacheable. Pages which do call # get_token(), assuming they use the token, are not cacheable because # the token is specific to the user req = self._get_GET_no_csrf_cookie_request() # non_token_view_using_request_processor does not call get_token(), but # does use the csrf request processor. By using this, we are testing # that the view processor is properly lazy and doesn't call get_token() # until needed. CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {}) resp = non_token_view_using_request_processor(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(csrf_cookie, False) # Check the request processing def test_process_request_no_csrf_cookie(self): """ Check that if no CSRF cookies is present, the middleware rejects the incoming request. This will stop login CSRF. """ req = self._get_POST_no_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_no_token(self): """ Check that if a CSRF cookie is present but no token, the middleware rejects the incoming request. """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_and_token(self): """ Check that if both a cookie and a token is present, the middleware lets it through. """ req = self._get_POST_request_with_token() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) def test_process_request_csrf_cookie_no_token_exempt_view(self): """ Check that if a CSRF cookie is present and no token, but the csrf_exempt decorator has been applied to the view, the middleware lets it through """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {}) self.assertEqual(None, req2) def test_csrf_token_in_header(self): """ Check that we can pass in the token in a header instead of in the form """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) def test_put_and_delete_rejected(self): """ Tests that HTTP PUT and DELETE methods have protection """ req = TestingHttpRequest() req.method = 'PUT' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) req = TestingHttpRequest() req.method = 'DELETE' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_put_and_delete_allowed(self): """ Tests that HTTP PUT and DELETE methods can get through with X-CSRFToken and a cookie """ req = self._get_GET_csrf_cookie_request() req.method = 'PUT' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) req = self._get_GET_csrf_cookie_request() req.method = 'DELETE' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) # Tests for the template tag method def test_token_node_no_csrf_cookie(self): """ Check that CsrfTokenNode works when no CSRF cookie is set """ req = self._get_GET_no_csrf_cookie_request() resp = token_view(req) self.assertEqual(u"", resp.content) def test_token_node_empty_csrf_cookie(self): """ Check that we get a new token if the csrf_cookie is the empty string """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = "" CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) self.assertNotEqual(u"", resp.content) def test_token_node_with_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is set """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_exempt_view(self): """ Check that get_token still works for a view decorated with 'csrf_exempt'. """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_requires_csrf_token_view(self): """ Check that get_token works for a view decorated solely with requires_csrf_token """ req = self._get_GET_csrf_cookie_request() resp = requires_csrf_token(token_view)(req) self._check_token_present(resp) def test_token_node_with_new_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is created by the middleware (when one was not already present) """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME] self._check_token_present(resp, csrf_id=csrf_cookie.value) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_bad_referer(self): """ Test that a POST HTTPS request with a bad referer is rejected """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertNotEqual(None, req2) self.assertEqual(403, req2.status_code) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer(self): """ Test that a POST HTTPS request with a good referer is accepted """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer_2(self): """ Test that a POST HTTPS request with a good referer is accepted where the referer contains no trailing slash """ # See ticket #15617 req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(None, req2) def test_ensures_csrf_cookie_no_middleware(self): """ Tests that ensures_csrf_cookie decorator fulfils its promise with no middleware """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() resp = view(req) self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertTrue('Cookie' in resp.get('Vary','')) def test_ensures_csrf_cookie_with_middleware(self): """ Tests that ensures_csrf_cookie decorator fulfils its promise with the middleware enabled. """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, view, (), {}) resp = view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertTrue('Cookie' in resp2.get('Vary',''))
hassoon3/odoo
refs/heads/8.0
addons/l10n_es/migrations/8.0.5.0/pre-migration.py
52
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2015 Serv. Tecnol. Avanz. (<http://www.serviciosbaeza.com>) # Pedro M. Baeza <pedro.baeza@serviciosbaeza.com> # FactorLibre (<http://factorlibre.com>) # Hugo santos <hugo.santos@factorlibre.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## __name__ = u"Renombrar impuestos, códigos de impuestos y posiciones fiscales" def rename_fiscal_positions(cr): cr.execute(""" UPDATE account_fiscal_position SET name='Régimen Extracomunitario / Canarias, Ceuta y Melilla' WHERE name='Régimen Extracomunitario' """) def rename_tax_codes(cr): tax_code_mapping = [ # IVA devengado. Base {'previous_code': '--', 'previous_name': 'IVA devengado. Base imponible', 'code': 'IDBI'}, {'previous_code': '--', 'previous_name': 'IVA Devengado Base Imponible', 'code': 'IDBI'}, {'previous_code': '[01]', 'code': 'RGIDBI4'}, {'previous_code': '[04]', 'previous_name': 'Régimen general IVA devengado. Base imponible 10%', 'code': 'RGIDBI10'}, {'previous_code': '[04]', 'previous_name': 'Régimen general IVA Devengado. Base Imponible 10%', 'code': 'RGIDBI10'}, {'previous_code': '[07]', 'previous_name': 'Régimen general IVA devengado. Base imponible 21%', 'code': 'RGIDBI21'}, {'previous_code': '[07]', 'previous_name': 'Régimen general IVA Devengado. Base Imponible 21%', 'code': 'RGIDBI21'}, # IVA devengado. Cuota {'previous_code': '[21]', 'code': 'ITDC'}, {'previous_code': '[03]', 'code': 'RGIDC4'}, {'previous_code': '[06]', 'previous_name': 'Régimen general IVA devengado. Cuota 10%', 'code': 'RGIDC10'}, {'previous_code': '[06]', 'previous_name': 'Régimen general IVA Devengado. Cuota 10%', 'code': 'RGIDC10'}, {'previous_code': '[09]', 'previous_name': 'Régimen general IVA devengado. Cuota 21%', 'code': 'RGIDC21'}, {'previous_code': '[09]', 'previous_name': 'Régimen general IVA Devengado. Cuota 21%', 'code': 'RGIDC21'}, # Adquisiciones intracomunitarias {'previous_code': '[19]', 'code': 'AIDBYSBI'}, {'previous_code': '[20]', 'code': 'AIDBYSC'}, # IVA deducible. Base Imponible {'previous_code': '--', 'previous_name': 'IVA deducible. Base imponible', 'code': 'ISBI'}, {'previous_code': '--', 'previous_name': 'IVA Deducible Base Imponible', 'code': 'ISBI'}, {'previous_code': '--', 'previous_name': 'Base de compensaciones Régimen Especial A., G. y' ' P. 12%', 'code': 'CREAGYPBI12'}, # Base operaciones interiores corrientes {'previous_code': '[22]', 'code': 'OICBI'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores corrientes (4%)', 'code': 'OICBI4'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores corrientes (10%)', 'code': 'OICBI10'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores corrientes (21%)', 'code': 'OICBI21'}, # Base operaciones interiores bienes de inversión {'previous_code': '[24]', 'code': 'OIBIBI'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores bienes inversión (4%)', 'code': 'OIBIBI4'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores bienes inversión (10%)', 'code': 'OIBIBI10'}, {'previous_code': '--', 'previous_name': 'Base operaciones interiores bienes inversión (21%)', 'code': 'OIBIBI21'}, # Base importaciones de bienes corrientes {'previous_code': '[26]', 'code': 'IBCBI'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes y servicios corrientes' ' (4%)', 'code': 'IBYSCBI4'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes y servicios corrientes' ' (10%)', 'code': 'IBYSCBI10'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes y servicios corrientes' ' (21%)', 'code': 'IBYSCBI21'}, # Base importaciones de bienes de inversión {'previous_code': '[28]', 'code': 'IBIBI'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes inversión (4%)', 'code': 'IBIBI4'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes inversión (10%)', 'code': 'IBIBI10'}, {'previous_code': '--', 'previous_name': 'Base importaciones bienes inversión (21%)', 'code': 'IBIBI21'}, # Adquisiciones intracomunitarias de bienes corrientes {'previous_code': '[30]', 'code': 'AIBYSCBI'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes y' ' serv. corr. (4%)', 'code': 'AIBYSCBI4'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes y' ' serv. corr. (10%)', 'code': 'AIBYSCBI10'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes y' ' serv. corr. (21%)', 'code': 'AIBYSCBI21'}, # Adquisiciones intracomunitarias de bienes de inversión {'previous_code': '[32]', 'code': 'AIBIBI'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes' ' inversión (4%)', 'code': 'AIBIBI4'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes' ' inversión (10%)', 'code': 'AIBIBI10'}, {'previous_code': '--', 'previous_name': 'Base adquisiciones intracomunitarias bienes' ' inversión (21%)', 'code': 'AIBIBI21'}, # Base recargo de equivalencia {'previous_code': '--', 'previous_name': 'Recargo equivalencia ded. Base imponible 0.5%', 'code': 'REDBI05'}, {'previous_code': '--', 'previous_name': 'Recargo equivalencia ded. Base imponible 1.4%', 'code': 'REDBI014'}, {'previous_code': '--', 'previous_name': 'Recargo equivalencia ded. Base imponible 5.2%', 'code': 'REDBI52'}, # Iva deducible cuotas {'previous_code': '[37]', 'code': 'ITADC'}, {'previous_code': '34', 'code': 'CREAGYP12'}, # Cuotas operaciones interiores corrientes {'previous_code': '[23]', 'code': 'SOICC'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores corrientes' ' (4%)', 'code': 'SOICC4'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores corrientes' ' (10%)', 'code': 'SOICC10'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores corrientes' ' (21%)', 'code': 'SOICC21'}, # Cuotas operaciones interiores con bienes de inversión {'previous_code': '[25]', 'code': 'SOIBIC'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores bienes' ' inversión (4%)', 'code': 'SOIBIC4'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores bienes' ' inversión (10%)', 'code': 'SOIBIC10'}, {'previous_code': '--', 'previous_name': 'Cuotas soportadas operaciones interiores bienes' ' inversión (21%)', 'code': 'SOIBIC21'}, # Cuotas devengadas en importaciones de bienes y serv. corr. {'previous_code': '[27]', 'code': 'DIBCC'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes y serv.' ' corr. (4%)', 'code': 'DIBYSCC4'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes y serv.' ' corr. (10%)', 'code': 'DIBYSCC10'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes y serv.' ' corr. (21%)', 'code': 'DIBYSCC21'}, # Cuotas devengadas en importaciones de bienes de inversión {'previous_code': '[29]', 'code': 'DIBIC'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes inversión' ' (4%)', 'code': 'DIBIC4'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes inversión' ' (10%)', 'code': 'DIBIC10'}, {'previous_code': '--', 'previous_name': 'Cuotas devengadas importaciones bienes inversión' ' (21%)', 'code': 'DIBIC21'}, # Adquisiciones intracomunitarias de bienes corrientes - Cuota {'previous_code': '[31]', 'code': 'AIBYSCC'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes y serv.' ' corr. (4%)', 'code': 'AIBYSCC4'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes y serv.' ' corr. (10%)', 'code': 'AIBYSCC10'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes y serv.' ' corr. (21%)', 'code': 'AIBYSCC21'}, # Adquisiciones intracomunitarias bienes de inversión - Cuota {'previous_code': '[33]', 'code': 'AIBIC'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes inversión' ' (4%)', 'code': 'AIBIC4'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes inversión' ' (10%)', 'code': 'AIBIC10'}, {'previous_code': '--', 'previous_name': 'En adquisiciones intracomunitarias bienes inversión' ' (21%)', 'code': 'AIBIC21'}, # Otros códigos de impuestos {'previous_code': '[42]', 'code': 'EIDBYS'}, {'previous_code': '[43]', 'code': 'EYOA'}, # Recargo equivalencia Cuota {'previous_code': '[12]', 'previous_name': 'Recargo equivalencia. Cuota 0.5%', 'code': 'REC05'}, {'previous_code': '[15]', 'previous_name': 'Recargo equivalencia. Cuota 1.4%', 'code': 'REC014'}, {'previous_code': '[18]', 'previous_name': 'Recargo equivalencia. Cuota 5.2%', 'code': 'REC52'}, # Recargo equivalencia ded. Cuota {'previous_code': '[12]', 'previous_name': 'Recargo equivalencia ded. Cuota 0.5%', 'code': 'REDC05'}, {'previous_code': '[15]', 'previous_name': 'Recargo equivalencia ded. Cuota 1.4%', 'code': 'REDC014'}, {'previous_code': '[18]', 'previous_name': 'Recargo equivalencia ded. Cuota 5.2%', 'code': 'REDC52'}, # Recargo equivalencia base imponible {'previous_code': '[10]', 'code': 'REBI05'}, {'previous_code': '[13]', 'code': 'REBI014'}, {'previous_code': '[16]', 'code': 'REBI52'}, # IRPF Retenciones a cuenta {'previous_code': 'B.IRPF AC', 'code': 'IRACBI'}, {'previous_code': 'B.IRPF1 AC', 'code': 'IRACBI1'}, {'previous_code': 'B.IRPF2 AC', 'code': 'IRACBI2'}, {'previous_code': 'B.IRPF7 AC', 'code': 'IRACBI7'}, {'previous_code': 'B.IRPF9 AC', 'code': 'IRACBI9'}, {'previous_code': 'B.IRPF15 AC', 'code': 'IRACBI15'}, {'previous_code': 'B.IRPF20 AC', 'code': 'IRACBI20'}, {'previous_code': 'B.IRPF21 AC', 'code': 'IRACBI21'}, # IRPF total retenciones a cuenta {'previous_code': 'IRPF AC', 'code': 'ITRACC'}, {'previous_code': 'IRPF1 AC', 'code': 'IRACC1'}, {'previous_code': 'IRPF2 AC', 'code': 'IRACC2'}, {'previous_code': 'IRPF7 AC', 'code': 'IRACC7'}, {'previous_code': 'IRPF9 AC', 'code': 'IRACC9'}, {'previous_code': 'IRPF15 AC', 'code': 'IRACC15'}, {'previous_code': 'IRPF20 AC', 'code': 'IRACC20'}, {'previous_code': 'IRPF21 AC', 'code': 'IRACC21'}, # IRPF retenciones practicadas. base imponible {'previous_code': 'B.IRPF', 'code': 'IRPBI'}, {'previous_code': 'B.IRPF1', 'code': 'IRPBI1'}, {'previous_code': 'B.IRPF2', 'code': 'IRPBI2'}, {'previous_code': 'B.IRPF7', 'code': 'IRPBI7'}, {'previous_code': 'B.IRPF9', 'code': 'IRPBI9'}, {'previous_code': 'B.IRPF15', 'code': 'IRPBI15'}, {'previous_code': 'B.IRPF20', 'code': 'IRPBI20'}, {'previous_code': 'B.IRPF21', 'code': 'IRPBI21'}, # IRPF retenciones practicadas. total cuota {'previous_code': 'IRPF', 'code': 'ITRPC'}, {'previous_code': 'IRPF1', 'code': 'IRPC1'}, {'previous_code': 'IRPF2', 'code': 'IRPC2'}, {'previous_code': 'IRPF7', 'code': 'IRPC7'}, {'previous_code': 'IRPF9', 'code': 'IRPC9'}, {'previous_code': 'IRPF15', 'code': 'IRPC15'}, {'previous_code': 'IRPF20', 'code': 'IRPC20'}, {'previous_code': 'IRPF21', 'code': 'IRPC21'}, # IVA exento {'previous_code': '--', 'previous_name': 'Base adquisiciones exentas', 'code': 'AEBI'}, {'previous_code': '--', 'previous_name': 'Base ventas exentas', 'code': 'OESDAD'}, ] for mapping in tax_code_mapping: sql = """ UPDATE account_tax_code SET code=%s WHERE code=%s""" if mapping.get('previous_name'): sql += " AND name=%s" cr.execute(sql, (mapping['code'], mapping['previous_code'], mapping['previous_name'])) else: cr.execute(sql, (mapping['code'], mapping['previous_code'])) def rename_taxes(cr): tax_mapping = { 'S_IVA4': 'S_IVA4B', 'S_IVA10': 'S_IVA10B', 'S_IVA21': 'S_IVA21B', 'P_IVA21_IC_SV': 'P_IVA21_SP_IN', 'P_IVA21_IC_SV_1': 'P_IVA21_SP_IN_1', 'P_IVA21_IC_SV_2': 'P_IVA21_SP_IN_2', } for old_description, new_description in tax_mapping.iteritems(): sql = """ UPDATE account_tax SET description=%s WHERE description=%s""" cr.execute(sql, (new_description, old_description)) def change_refunds_tax_codes(cr): """Cambia los códigos de impuestos de los abonos posteriores a 2014 para que vayan a la parte de modificación de bases/cuotas en lugar de minorar las bases/cuotas normales. """ refund_tax_codes = { # IVA repercutido 'RGIDBI4': 'MBYCRBI', 'RGIDBI10': 'MBYCRBI', 'RGIDBI21': 'MBYCRBI', 'RGIDC4': 'MBYCRC', 'RGIDC10': 'MBYCRC', 'RGIDC21': 'MBYCRC', # Recargo equivalencia compras 'REDBI05': 'RDDSBI', 'REDBI014': 'RDDSBI', 'REDBI52': 'RDDSBI', 'REDC05': 'RDDSC', 'REDC014': 'RDDSC', 'REDC52': 'RDDSC', # Recargo equivalencia ventas 'REBI05': 'MBYCDRDERBI', 'REBI014': 'MBYCDRDERBI', 'REBI52': 'MBYCDRDERBI', 'REC05': 'MBYCDRDERC', 'REC014': 'MBYCDRDERC', 'REC52': 'MBYCDRDERC', # IVA soportado 'OICBI4': 'RDDSBI', 'OIBIBI4': 'RDDSBI', 'OICBI10': 'RDDSBI', 'OIBIBI10': 'RDDSBI', 'OICBI21': 'RDDSBI', 'OIBIBI21': 'RDDSBI', 'SOICC4': 'RDDSC', 'SOIBIC4': 'RDDSC', 'SOICC10': 'RDDSC', 'SOIBIC10': 'RDDSC', 'SOICC21': 'RDDSC', 'SOIBIC21': 'RDDSC', # Importaciones 'IBYSCBI4': 'RDDSBI', 'IBYSCBI10': 'RDDSBI', 'IBYSCBI21': 'RDDSBI', 'IBIBI4': 'RDDSBI', 'IBIBI10': 'RDDSBI', 'IBIBI21': 'RDDSBI', 'DIBYSCC4': 'RDDSC', 'DIBYSCC10': 'RDDSC', 'DIBYSCC21': 'RDDSC', 'DIBIC4': 'RDDSC', 'DIBIC10': 'RDDSC', 'DIBIC21': 'RDDSC', # Intracomunitario 'AIBYSCBI4': 'RDDSBI', 'AIBYSCBI10': 'RDDSBI', 'AIBYSCBI21': 'RDDSBI', 'AISCBI4': 'RDDSBI', 'AISCBI10': 'RDDSBI', 'AISCBI21': 'RDDSBI', 'AIBIBI4': 'RDDSBI', 'AIBIBI10': 'RDDSBI', 'AIBIBI21': 'RDDSBI', 'AIBYSCC4': 'RDDSC', 'AIBYSCC10': 'RDDSC', 'AIBYSCC21': 'RDDSC', 'AISCC4': 'RDDSC', 'AISCC10': 'RDDSC', 'AISCC21': 'RDDSC', 'AIBIC4': 'RDDSC', 'AIBIC10': 'RDDSC', 'AIBIC21': 'RDDSC', 'AIDBYSBI': 'MBYCRBI', 'AIBBI': 'MBYCRBI', 'AIBIBIA': 'MBYCRBI', 'OCIDSPEAIBI': 'MBYCRBI', 'AISBI': 'MBYCRBI', 'AIDBYSC': 'MBYCRC', 'AIBC': 'MBYCRC', 'AIBICA': 'MBYCRC', 'OOCIDSPEAIC': 'MBYCRC', 'AISC': 'MBYCRC', } cr.execute("SELECT id FROM res_company") for record in cr.fetchall(): company_id = record[0] for old_tax_code, new_tax_code in refund_tax_codes.iteritems(): cr.execute( "SELECT id FROM account_tax_code WHERE code=%s", (new_tax_code, )) new_tax_code_id = cr.fetchone() if not new_tax_code_id: # Create fake tax code cr.execute( """ INSERT INTO account_tax_code (code, name, sign, company_id) VALUES (%s, %s, %s, %s) RETURNING id """, (new_tax_code, new_tax_code, 1.0, company_id)) new_tax_code_id = cr.fetchone()[0] cr.execute( """ UPDATE account_move_line aml SET tax_code_id=%s FROM account_tax_code atc WHERE aml.tax_code_id=atc.id AND atc.code=%s AND aml.tax_amount < 0 AND aml.date>='2014-01-01' AND aml.company_id=%s """, (new_tax_code_id, old_tax_code, company_id)) def migrate(cr, version): if not version: return rename_fiscal_positions(cr) rename_tax_codes(cr) rename_taxes(cr) change_refunds_tax_codes(cr)
spock1104/vm696-kernel
refs/heads/master
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
886
# Util.py - Python extension for perf trace, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def clear_term(): print("\x1b[H\x1b[2J")
DavidMikeSimon/ansible
refs/heads/devel
lib/ansible/vars/__init__.py
31
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from collections import defaultdict from collections import MutableMapping from jinja2.exceptions import UndefinedError try: from hashlib import sha1 except ImportError: from sha import sha as sha1 from ansible import constants as C from ansible.cli import CLI from ansible.errors import * from ansible.parsing import DataLoader from ansible.plugins.cache import FactCache from ansible.template import Templar from ansible.utils.debug import debug from ansible.vars.hostvars import HostVars CACHED_VARS = dict() class VariableManager: def __init__(self): self._fact_cache = FactCache() self._vars_cache = defaultdict(dict) self._extra_vars = defaultdict(dict) self._host_vars_files = defaultdict(dict) self._group_vars_files = defaultdict(dict) self._inventory = None self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest() def _get_cache_entry(self, play=None, host=None, task=None): play_id = "NONE" if play: play_id = play._uuid host_id = "NONE" if host: host_id = host.get_name() task_id = "NONE" if task: task_id = task._uuid return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id) @property def extra_vars(self): ''' ensures a clean copy of the extra_vars are made ''' return self._extra_vars.copy() @extra_vars.setter def extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' assert isinstance(value, MutableMapping) self._extra_vars = value.copy() def set_inventory(self, inventory): self._inventory = inventory def _validate_both_dicts(self, a, b): ''' Validates that both arguments are dictionaries, or an error is raised. ''' if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)): raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)) def _combine_vars(self, a, b): ''' Combines dictionaries of variables, based on the hash behavior ''' self._validate_both_dicts(a, b) if C.DEFAULT_HASH_BEHAVIOUR == "merge": return self._merge_dicts(a, b) else: return dict(a.items() + b.items()) def _merge_dicts(self, a, b): ''' Recursively merges dict b into a, so that keys from b take precedence over keys from a. ''' result = dict() self._validate_both_dicts(a, b) for dicts in a, b: # next, iterate over b keys and values for k, v in dicts.iteritems(): # if there's already such key in a # and that key contains dict if k in result and isinstance(result[k], dict): # merge those dicts recursively result[k] = self._merge_dicts(a[k], v) else: # otherwise, just copy a value from b to a result[k] = v return result def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - vars_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in CACHED_VARS and use_cache: debug("vars are cached, returning them now") return CACHED_VARS[cache_entry] all_vars = defaultdict(dict) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = self._combine_vars(all_vars, role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: all_vars = self._combine_vars(all_vars, self._group_vars_files['all']) for group in host.get_groups(): all_vars = self._combine_vars(all_vars, group.get_vars()) if group.name in self._group_vars_files and group.name != 'all': all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name]) host_name = host.get_name() if host_name in self._host_vars_files: all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name]) # then we merge in vars specified for this host all_vars = self._combine_vars(all_vars, host.get_vars()) # next comes the facts cache and the vars cache, respectively try: all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.name, dict())) except KeyError: pass if play: all_vars = self._combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): try: # create a set of temporary vars here, which incorporate the # extra vars so we can properly template the vars_files entries temp_vars = self._combine_vars(all_vars, self._extra_vars) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = templar.template(vars_file_item) if not isinstance(vars_file_list, list): vars_file_list = [ vars_file_list ] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. for vars_file in vars_file_list: data = loader.load_from_file(vars_file) if data is not None: all_vars = self._combine_vars(all_vars, data) break else: raise AnsibleError("vars file %s was not found" % vars_file_item) except AnsibleError, e: # FIXME: get_vars should probably be taking a flag to determine # whether or not vars files errors should be fatal at this # stage, or just base it on whether a host was specified? pass except UndefinedError, e: continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = self._combine_vars(all_vars, role.get_vars()) if host: all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict())) if task: if task._role: all_vars = self._combine_vars(all_vars, task._role.get_vars()) all_vars = self._combine_vars(all_vars, task.get_vars()) all_vars = self._combine_vars(all_vars, self._extra_vars) # FIXME: make sure all special vars are here # Finally, we create special vars all_vars['playbook_dir'] = loader.get_basedir() if host: all_vars['groups'] = [group.name for group in host.get_groups()] if self._inventory is not None: hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader) all_vars['hostvars'] = hostvars all_vars['groups'] = self._inventory.groups_list() if task: if task._role: all_vars['role_path'] = task._role._role_path if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() if play: # add the list of hosts in the play, as adjusted for limit/filters # FIXME: play_hosts should be deprecated in favor of ansible_play_hosts, # however this would take work in the templating engine, so for now # we'll add both so we can give users something transitional to use host_list = [x.name for x in self._inventory.get_hosts()] all_vars['play_hosts'] = host_list all_vars['ansible_play_hosts'] = host_list # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token all_vars['ansible_version'] = CLI.version_info(gitinfo=False) # make vars self referential, so people can do things like 'vars[var_name]' copied_vars = all_vars.copy() if 'hostvars' in copied_vars: del copied_vars['hostvars'] all_vars['vars'] = all_vars.copy() #CACHED_VARS[cache_entry] = all_vars debug("done with get_vars()") return all_vars def _get_inventory_basename(self, path): ''' Returns the bsaename minus the extension of the given path, so the bare filename can be matched against host/group names later ''' (name, ext) = os.path.splitext(os.path.basename(path)) if ext not in ('.yml', '.yaml'): return os.path.basename(path) else: return name def _load_inventory_file(self, path, loader): ''' helper function, which loads the file and gets the basename of the file without the extension ''' if loader.is_directory(path): data = dict() try: names = loader.list_directory(path) except os.error as err: raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror)) # evaluate files in a stable order rather than whatever # order the filesystem lists them. names.sort() # do not parse hidden files or dirs, e.g. .svn/ paths = [os.path.join(path, name) for name in names if not name.startswith('.')] for p in paths: _found, results = self._load_inventory_file(path=p, loader=loader) if results is not None: data = self._combine_vars(data, results) else: file_name, ext = os.path.splitext(path) data = None if not ext: for ext in C.YAML_FILENAME_EXTENSIONS: new_path = path + ext if loader.path_exists(new_path): data = loader.load_from_file(new_path) break else: if loader.path_exists(path): data = loader.load_from_file(path) name = self._get_inventory_basename(path) return (name, data) def add_host_vars_file(self, path, loader): ''' Loads and caches a host_vars file in the _host_vars_files dict, where the key to that dictionary is the basename of the file, minus the extension, for matching against a given inventory host name ''' (name, data) = self._load_inventory_file(path, loader) if data: self._host_vars_files[name] = data return data else: return dict() def add_group_vars_file(self, path, loader): ''' Loads and caches a host_vars file in the _host_vars_files dict, where the key to that dictionary is the basename of the file, minus the extension, for matching against a given inventory host name ''' (name, data) = self._load_inventory_file(path, loader) if data: self._group_vars_files[name] = data return data else: return dict() def set_host_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' assert isinstance(facts, dict) if host.name not in self._fact_cache: self._fact_cache[host.name] = facts else: try: self._fact_cache[host.name].update(facts) except KeyError: self._fact_cache[host.name] = facts def set_host_variable(self, host, varname, value): ''' Sets a value in the vars_cache for a host. ''' host_name = host.get_name() if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() self._vars_cache[host_name][varname] = value
rd37/horizon
refs/heads/master
openstack_dashboard/dashboards/admin/flavors/extras/tests.py
8
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox import IsA # noqa from openstack_dashboard import api from openstack_dashboard.test import helpers as test class FlavorExtrasTests(test.BaseAdminViewTests): @test.create_stubs({api.nova: ('flavor_get_extras', 'flavor_get'), }) def test_list_extras_when_none_exists(self): flavor = self.flavors.first() extras = [api.nova.FlavorExtraSpec(flavor.id, 'k1', 'v1')] # GET -- to determine correctness of output api.nova.flavor_get(IsA(http.HttpRequest), flavor.id).AndReturn(flavor) api.nova.flavor_get_extras(IsA(http.HttpRequest), flavor.id).AndReturn(extras) self.mox.ReplayAll() url = reverse('horizon:admin:flavors:extras:index', args=[flavor.id]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertTemplateUsed(resp, "admin/flavors/extras/index.html") @test.create_stubs({api.nova: ('flavor_extra_set', ), }) def _generic_extra_create_post(self, key_name): flavor = self.flavors.first() create_url = reverse('horizon:admin:flavors:extras:create', args=[flavor.id]) index_url = reverse('horizon:admin:flavors:extras:index', args=[flavor.id]) # GET to display the flavor_name api.nova.flavor_extra_set(IsA(http.HttpRequest), flavor.id, {key_name: 'v1'}) self.mox.ReplayAll() data = {'flavor_id': flavor.id, 'keys': 'custom', 'key': key_name, 'value': 'v1'} resp = self.client.post(create_url, data) self.assertNoFormErrors(resp) self.assertRedirectsNoFollow(resp, index_url) self.mox.UnsetStubs() @test.create_stubs({api.nova: ('flavor_extra_set', ), }) def test_extra_create_with_template(self): flavor = self.flavors.first() create_url = reverse('horizon:admin:flavors:extras:create', args=[flavor.id]) index_url = reverse('horizon:admin:flavors:extras:index', args=[flavor.id]) # GET to display the flavor_name api.nova.flavor_extra_set(IsA(http.HttpRequest), flavor.id, {'quota:read_bytes_sec': '1000'}) self.mox.ReplayAll() data = {'flavor_id': flavor.id, 'keys': 'quota:read_bytes_sec', 'value': '1000'} resp = self.client.post(create_url, data) self.assertNoFormErrors(resp) self.assertRedirectsNoFollow(resp, index_url) @test.create_stubs({api.nova: ('flavor_get', ), }) def test_extra_create_get(self): flavor = self.flavors.first() create_url = reverse('horizon:admin:flavors:extras:create', args=[flavor.id]) api.nova.flavor_get(IsA(http.HttpRequest), flavor.id).AndReturn(flavor) self.mox.ReplayAll() resp = self.client.get(create_url) self.assertEqual(resp.status_code, 200) self.assertTemplateUsed(resp, 'admin/flavors/extras/create.html') @test.create_stubs({api.nova: ('flavor_get', ), }) def _generic_extra_create_names_format_fail(self, key_name): flavor = self.flavors.first() create_url = reverse('horizon:admin:flavors:extras:create', args=[flavor.id]) api.nova.flavor_get(IsA(http.HttpRequest), flavor.id).AndReturn(flavor) self.mox.ReplayAll() data = {'flavor_id': flavor.id, 'keys': 'custom', 'key': key_name, 'value': 'v1'} resp = self.client.post(create_url, data) msg = ('Name may only contain letters, numbers, underscores, periods, ' 'colons, spaces and hyphens.') self.assertFormErrors(resp, 1, msg) self.mox.UnsetStubs() def test_create_extra_key_names_valid_formats(self): valid_keys = ("key1", "month.price", "I-Am:AK-ey. 22-") for x in valid_keys: self._generic_extra_create_post(key_name=x) def test_create_extra_key_names_invalid_formats(self): invalid_keys = ("key1/", "<key>", "$$akey$", "!akey") for x in invalid_keys: self._generic_extra_create_names_format_fail(key_name=x)
byndcivilization/toy-infrastructure
refs/heads/master
flask-app/venv/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py
2918
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
hamzehd/edx-platform
refs/heads/master
lms/djangoapps/licenses/__init__.py
12133432
guykr-stratoscale/powerline-shell
refs/heads/master
segments/ssh.py
22
import os def add_ssh_segment(): if os.getenv('SSH_CLIENT'): powerline.append(' %s ' % powerline.network, Color.SSH_FG, Color.SSH_BG) add_ssh_segment()
almarklein/flexx
refs/heads/master
flexx/pyscript/functions.py
21
import os import sys import types import inspect import subprocess import hashlib from . import Parser if sys.version_info[0] >= 3: string_types = str, text_type = str else: # pragma: no cover string_types = basestring, text_type = unicode class JSString(text_type): """ A subclass of string, so we can add attributes to JS string objects. """ pass def py2js(ob, new_name=None, **parser_options): """ Convert Python to JavaScript. Parameters: ob (str, function, class): The code, function or class to transpile. new_name (str, optional): If given, renames the function or class. This argument is ignored if ob is a string. parser_options: Additional options for the parser. See Parser class for details. Returns: jscode (str): The JavaScript code. Also has a ``pycode`` attribute. Notes: The Python source code for a class is acquired by name. Therefore one should avoid decorating classes in modules where multiple classes with the same name are defined. This is a consequence of classes not having a corresponding code object (in contrast to functions). """ if isinstance(ob, string_types): thetype = 'str' pycode = ob elif isinstance(ob, type) or isinstance(ob, (types.FunctionType, types.MethodType)): thetype = 'class' if isinstance(ob, type) else 'def' # Get code try: lines, linenr = inspect.getsourcelines(ob) except Exception as err: raise ValueError('Could not get source code for object %r: %s' % (ob, err)) # Normalize indentation indent = len(lines[0]) - len(lines[0].lstrip()) lines = [line[indent:] for line in lines] # Skip any decorators while not lines[0].lstrip().startswith(thetype): lines.pop(0) # join lines and rename pycode = ''.join(lines) else: raise ValueError('py2js() only accepts classes and real functions.') # Get hash, in case we ever want to cache JS accross sessions h = hashlib.sha256('pyscript version 1'.encode()) h.update(pycode.encode()) hash = h.digest() # Get JS code p = Parser(pycode, **parser_options) jscode = p.dump() if new_name and thetype in ('class', 'def'): jscode = js_rename(jscode, ob.__name__, new_name) # Wrap in JSString jscode = JSString(jscode) jscode.pycode = pycode jscode.pyhash = hash return jscode def js_rename(jscode, cur_name, new_name): """ Rename a function or class in a JavaScript code string. Parameters: jscode (str): the JavaScript source code cur_name (str): the current name new_name (str): the name to replace the current name with Returns: jscode (str): the modified JavaScript source code """ jscode = jscode.replace('%s = function' % cur_name, '%s = function' % (new_name), 1) jscode = jscode.replace('%s.prototype' % cur_name, '%s.prototype' % new_name) if '.' in new_name: jscode = jscode.replace('var %s;\n' % cur_name, '', 1) else: jscode = jscode.replace('var %s;\n' % cur_name, 'var %s;\n' % new_name, 1) return jscode NODE_EXE = None def get_node_exe(): """ Small utility that provides the node exe. The first time this is called both 'nodejs' and 'node' are tried. To override the executable path, set the ``FLEXX_NODE_EXE`` environment variable. """ # This makes things work on Ubuntu's nodejs as well as other node # implementations, and allows users to set the node exe if necessary global NODE_EXE NODE_EXE = os.getenv('FLEXX_NODE_EXE') or NODE_EXE if NODE_EXE is None: NODE_EXE = 'nodejs' try: subprocess.check_output([NODE_EXE, '-v']) except Exception: # pragma: no cover NODE_EXE = 'node' return NODE_EXE def evaljs(jscode, whitespace=True): """ Evaluate JavaScript code in Node.js. parameters: jscode (str): the JavaScript code to evaluate. whitespace (bool): if whitespace is False, the whitespace is removed from the result. returns: result (str): the last result as a string. """ res = subprocess.check_output([get_node_exe(), '-p', '-e', jscode]) res = res.decode().rstrip() if res.endswith('undefined'): res = res[:-9].rstrip() if not whitespace: res = res.replace('\n', '').replace('\t', '').replace(' ', '') return res def evalpy(pycode, whitespace=True): """ Evaluate PyScript code in Node.js (after translating to JS). parameters ---------- pycode : str the PyScript code to evaluate. whitespace : bool if whitespace is False, the whitespace is removed from the result. returns ------- result : str the last result as a string. """ # delibirate numpy doc style to see if napoleon handles it the same return evaljs(py2js(pycode), whitespace) def script2js(filename, namespace=None, target=None): """ Export a .py file to a .js file. Parameters: filename (str): the filename of the .py file to transpile. namespace (str): the namespace for this module. (optional) target (str): the filename of the resulting .js file. If not given or None, will use the ``filename``, but with a ``.js`` extension. """ # Import assert filename.endswith('.py') pycode = open(filename, 'rt').read() # Convert jscode = Parser(pycode, namespace).dump() jscode = '/* Do not edit, autogenerated by flexx.pyscript */\n\n' + jscode # Export if target is None: dirname, fname = os.path.split(filename) filename2 = os.path.join(dirname, fname[:-3] + '.js') else: filename2 = target open(filename2, 'wt').write(jscode) def clean_code(code): """ Remove duplicate declarations of std function definitions. Use this if you build a JS source file from multiple snippets and want to get rid of the function declarations like ``_truthy`` and ``sum``. Parameters: code (str): the complete source code. Returns: new_code (str): the code with duplicate definitions filtered out. """ known_funcs = {} lines = [] for line in code.splitlines(): line2 = line.lstrip() indent = len(line) - len(line2) if line2.startswith('var ') and ' = function ' in line2: prev_indent = known_funcs.get(line2, 999) if prev_indent == 0 : continue if indent == 0: known_funcs[line2] = indent lines.append(line) return '\n'.join(lines)
Santinell/ansible-modules-core
refs/heads/devel
cloud/amazon/ec2_asg.py
33
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: ec2_asg short_description: Create or delete AWS Autoscaling Groups description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations version_added: "1.6" author: "Gareth Rushgrove (@garethr)" options: state: description: - register or deregister the instance required: true choices: ['present', 'absent'] name: description: - Unique name for group to be created or deleted required: true load_balancers: description: - List of ELB names to use for the group required: false availability_zones: description: - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. required: false launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. required: true min_size: description: - Minimum number of instances in group, if unspecified then the current group value will be used. required: false max_size: description: - Maximum number of instances in group, if unspecified then the current group value will be used. required: false desired_capacity: description: - Desired number of instances in group, if unspecified then the current group value will be used. required: false replace_all_instances: description: - In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration. required: false version_added: "1.8" default: False replace_batch_size: description: - Number of instances you'd like to replace at a time. Used with replace_all_instances. required: false version_added: "1.8" default: 1 replace_instances: description: - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. required: false version_added: "1.8" default: None lc_check: description: - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config. required: false version_added: "1.8" default: True vpc_zone_identifier: description: - List of VPC subnets to use required: false default: None tags: description: - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. required: false default: None version_added: "1.7" health_check_period: description: - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. required: false default: 500 seconds version_added: "1.7" health_check_type: description: - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. required: false default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. required: false default: 300 seconds version_added: "2.0" wait_timeout: description: - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. default: 300 version_added: "1.8" wait_for_instances: description: - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy". version_added: "1.9" default: yes required: False termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained required: false default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" extends_documentation_fragment: - aws - ec2 """ EXAMPLES = ''' # Basic configuration - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no # Rolling ASG Updates Below is an example of how to assign a new launch config to an ASG and terminate old instances. All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in a rolling fashion with instances using the current launch configuration, "my_new_lc". This could also be considered a rolling deploy of a pre-baked AMI. If this is a newly created group, the instances will not be replaced since all instances will have the current launch configuration. - name: create launch config ec2_lc: name: my_new_lc image_id: ami-lkajsf key_name: mykey region: us-east-1 security_groups: sg-23423 instance_type: m1.small assign_public_ip: yes - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_all_instances: yes min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 To only replace a couple of instances instead of all of them, supply a list to "replace_instances": - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_instances: - i-b345231 - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 ''' import time import logging as log from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * log.getLogger('boto').setLevel(log.CRITICAL) #log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') try: import boto.ec2.autoscale from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', 'health_check_period', 'health_check_type', 'launch_config_name', 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', 'termination_policies', 'vpc_zone_identifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') def enforce_required_arguments(module): ''' As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce them here ''' missing_args = [] for arg in ('min_size', 'max_size', 'launch_config_name'): if module.params[arg] is None: missing_args.append(arg) if missing_args: module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args)) def get_properties(autoscaling_group): properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES) # Ugly hack to make this JSON-serializable. We take a list of boto Tag # objects and replace them with a dict-representation. Needed because the # tags are included in ansible's return value (which is jsonified) if 'tags' in properties and isinstance(properties['tags'], list): serializable_tags = {} for tag in properties['tags']: serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch] properties['tags'] = serializable_tags properties['healthy_instances'] = 0 properties['in_service_instances'] = 0 properties['unhealthy_instances'] = 0 properties['pending_instances'] = 0 properties['viable_instances'] = 0 properties['terminating_instances'] = 0 instance_facts = {} if autoscaling_group.instances: properties['instances'] = [i.instance_id for i in autoscaling_group.instances] for i in autoscaling_group.instances: instance_facts[i.instance_id] = {'health_status': i.health_status, 'lifecycle_state': i.lifecycle_state, 'launch_config_name': i.launch_config_name } if i.health_status == 'Healthy' and i.lifecycle_state == 'InService': properties['viable_instances'] += 1 if i.health_status == 'Healthy': properties['healthy_instances'] += 1 else: properties['unhealthy_instances'] += 1 if i.lifecycle_state == 'InService': properties['in_service_instances'] += 1 if i.lifecycle_state == 'Terminating': properties['terminating_instances'] += 1 if i.lifecycle_state == 'Pending': properties['pending_instances'] += 1 properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) return properties def elb_dreg(asg_connection, module, group_name, instance_id): region, ec2_url, aws_connect_params = get_aws_connection_info(module) as_group = asg_connection.get_all_groups(names=[group_name])[0] wait_timeout = module.params.get('wait_timeout') props = get_properties(as_group) count = 1 if as_group.load_balancers and as_group.health_check_type == 'ELB': try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) else: return for lb in as_group.load_balancers: elb_connection.deregister_instances(lb, instance_id) log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 for lb in as_group.load_balancers: lb_instances = elb_connection.describe_instance_health(lb) for i in lb_instances: if i.instance_id == instance_id and i.state == "InService": count += 1 log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) def elb_healthy(asg_connection, elb_connection, module, group_name): healthy_instances = [] as_group = asg_connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(instance) log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) log.debug("ELB instance status:") for lb in as_group.load_balancers: # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = elb_connection.describe_instance_health(lb, instances=instances) except boto.exception.InvalidInstance: pass for i in lb_instances: if i.state == "InService": healthy_instances.append(i.instance_id) log.debug("{0}: {1}".format(i.instance_id, i.state)) return len(healthy_instances) def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = asg_connection.get_all_groups(names=[group_name])[0] if as_group.load_balancers and as_group.health_check_type == 'ELB': log.debug("Waiting for ELB to consider intances healthy.") try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) while healthy_instances < as_group.min_size and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) def create_autoscaling_group(connection, module): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') min_size = module.params['min_size'] max_size = module.params['max_size'] desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k,v in tag.iteritems(): if k !='propagate_at_launch': asg_tags.append(Tag(key=k, value=v, propagate_at_launch=bool(tag.get('propagate_at_launch', True)), resource_id=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] enforce_required_arguments(module) launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) ag = AutoScalingGroup( group_name=group_name, load_balancers=load_balancers, availability_zones=availability_zones, launch_config=launch_configs[0], min_size=min_size, max_size=max_size, desired_capacity=desired_capacity, vpc_zone_identifier=vpc_zone_identifier, connection=connection, tags=asg_tags, health_check_period=health_check_period, health_check_type=health_check_type, default_cooldown=default_cooldown, termination_policies=termination_policies) try: connection.create_auto_scaling_group(ag) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) changed = True return(changed, asg_properties) except BotoServerError, e: module.fail_json(msg=str(e)) else: as_group = as_groups[0] changed = False for attr in ASG_ATTRIBUTES: if module.params.get(attr, None) is not None: module_attr = module.params.get(attr) if attr == 'vpc_zone_identifier': module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently try: module_attr.sort() except: pass try: group_attr.sort() except: pass if group_attr != module_attr: changed = True setattr(as_group, attr, module_attr) if len(set_tags) > 0: have_tags = {} want_tags = {} for tag in asg_tags: want_tags[tag.key] = [tag.value, tag.propagate_at_launch] dead_tags = [] for tag in as_group.tags: have_tags[tag.key] = [tag.value, tag.propagate_at_launch] if tag.key not in want_tags: changed = True dead_tags.append(tag) if dead_tags != []: connection.delete_tags(dead_tags) if have_tags != want_tags: changed = True connection.create_or_update_tags(asg_tags) # handle loadbalancers separately because None != [] load_balancers = module.params.get('load_balancers') or [] if load_balancers and as_group.load_balancers != load_balancers: changed = True as_group.load_balancers = module.params.get('load_balancers') if changed: try: as_group.update() except BotoServerError, e: module.fail_json(msg=str(e)) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) except BotoServerError, e: module.fail_json(msg=str(e)) return(changed, asg_properties) def delete_autoscaling_group(connection, module): group_name = module.params.get('name') groups = connection.get_all_groups(names=[group_name]) if groups: group = groups[0] group.max_size = 0 group.min_size = 0 group.desired_capacity = 0 group.update() instances = True while instances: tmp_groups = connection.get_all_groups(names=[group_name]) if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.instances: instances = False time.sleep(10) group.delete() while len(connection.get_all_groups(names=[group_name])): time.sleep(5) changed=True return changed else: changed=False return changed def get_chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i+n] def update_size(group, max_size, min_size, dc): log.debug("setting ASG sizes") log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size )) group.max_size = max_size group.min_size = min_size group.desired_capacity = dc group.update() def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') lc_check = module.params.get('lc_check') replace_instances = module.params.get('replace_instances') as_group = connection.get_all_groups(names=[group_name])[0] wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances # check to see if instances are replaceable if checking launch configs new_instances, old_instances = get_instances_by_lc(props, lc_check, instances) num_new_inst_needed = desired_capacity - len(new_instances) if lc_check: if num_new_inst_needed == 0 and old_instances: log.debug("No new instances needed, but old instances are present. Removing old instances") terminate_batch(connection, module, old_instances, instances, True) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) changed = True return(changed, props) # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) #check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group.min_size if max_size is None: max_size = as_group.max_size if desired_capacity is None: desired_capacity = as_group.desired_capacity # set temporary settings and wait for them to be reached # This should get overriden if the number of instances left is less than the batch size. as_group = connection.get_all_groups(names=[group_name])[0] update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances log.debug("beginning main loop") for i in get_chunks(instances, batch_size): # break out of this loop if we have enough new instances break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False) wait_for_term_inst(connection, module, term_instances) wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] if break_early: log.debug("breaking loop") break update_size(as_group, max_size, min_size, desired_capacity) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) log.debug("Rolling update complete.") changed=True return(changed, asg_properties) def get_instances_by_lc(props, lc_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch config if lc_check: for i in props['instances']: if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: new_instances.append(i) else: old_instances.append(i) else: log.debug("Comparing initial instances with current: {0}".format(initial_instances)) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances)) log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances)) return new_instances, old_instances def list_purgeable_instances(props, lc_check, replace_instances, initial_instances): instances_to_terminate = [] instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config if lc_check: for i in instances: if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False): batch_size = module.params.get('replace_batch_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') group_name = module.params.get('name') wait_timeout = int(module.params.get('wait_timeout')) lc_check = module.params.get('lc_check') decrement_capacity = False break_loop = False as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) desired_size = as_group.min_size new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances) num_new_inst_needed = desired_capacity - len(new_instances) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances) log.debug("new instances needed: {0}".format(num_new_inst_needed)) log.debug("new instances: {0}".format(new_instances)) log.debug("old instances: {0}".format(old_instances)) log.debug("batch instances: {0}".format(",".join(instances_to_terminate))) if num_new_inst_needed == 0: decrement_capacity = True if as_group.min_size != min_size: as_group.min_size = min_size as_group.update() log.debug("Updating minimum size back to original of {0}".format(min_size)) #if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: decrement_capacity = False break_loop = True instances_to_terminate = old_instances desired_size = min_size log.debug("No new instances needed") if num_new_inst_needed < batch_size and num_new_inst_needed !=0 : instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False log.debug("{0} new instances needed".format(num_new_inst_needed)) log.debug("decrementing capacity: {0}".format(decrement_capacity)) for instance_id in instances_to_terminate: elb_dreg(connection, module, group_name, instance_id) log.debug("terminating instance: {0}".format(instance_id)) connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list return break_loop, desired_size, instances_to_terminate def wait_for_term_inst(connection, module, term_instances): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') lc_check = module.params.get('lc_check') as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: log.debug("waiting for instances to terminate") count = 0 as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instance_facts = props['instance_facts'] instances = ( i for i in instance_facts if i in term_instances) for i in instances: lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) if lifecycle == 'Terminating' or healthy == 'Unhealthy': count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) time.sleep(10) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) log.debug("Reached {0}: {1}".format(prop, desired_size)) return props def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), load_balancers=dict(type='list'), availability_zones=dict(type='list'), launch_config_name=dict(type='str'), min_size=dict(type='int'), max_size=dict(type='int'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), lc_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), termination_policies=dict(type='list', default='Default') ), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) if not connection: module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) changed = create_changed = replace_changed = False if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) elif state == 'absent': changed = delete_autoscaling_group(connection, module) module.exit_json( changed = changed ) if replace_all_instances or replace_instances: replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) if __name__ == '__main__': main()
AurelienRoy/ardupilot
refs/heads/master
Tools/autotest/pysim/testwind.py
246
#!/usr/bin/env python # simple test of wind generation code import util, time, random from rotmat import Vector3 wind = util.Wind('7,90,0.1') t0 = time.time() velocity = Vector3(0,0,0) t = 0 deltat = 0.01 while t < 60: print("%.4f %f" % (t, wind.drag(velocity, deltat=deltat).length())) t += deltat
darjeeling/django
refs/heads/master
tests/migrations/test_migrations_squashed_ref_squashed/app1/4_auto.py
133
from django.db import migrations class Migration(migrations.Migration): dependencies = [("app1", "2_squashed_3")]
ivh/VAMDC-VALD
refs/heads/master
nodes/starkb/node/admin.py
59
from django.contrib import admin #import your models here like this: #from DjNodeExample.node.models import * # add your models here to have them in the admin interface # for example if you have a model class called "Transition": #admin.site.register(Transition)
dya2/python-for-android
refs/heads/master
python3-alpha/extra_modules/pyxmpp2/simple.py
46
# # (C) Copyright 2005-2011 Jacek Konieczny <jajcus@jajcus.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License Version # 2.1 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # """Simple API for simple things like sendig messages. The simplest way to send a message: >>> from pyxmpp2.simple import send_message >>> send_message("bob@example.org", "bob's password", "alice@example.org", ... "Hello Alice") Please note, though: - this is inefficient for anything more than sending a single message, as a new connection is established and closed for each `send_message` call. - the default TLS settings are insecure (not peer certificate validation) """ __docformat__ = "restructuredtext en" import sys from .interfaces import EventHandler, event_handler, QUIT from .client import Client from .jid import JID from .streamevents import AuthorizedEvent, DisconnectedEvent from .message import Message from .settings import XMPPSettings class FireAndForget(EventHandler): """A minimal XMPP client that just connects to a server and runs single function. :Ivariables: - `action`: the function to run after the stream is authorized - `client`: a `Client` instance to do the rest of the job :Types: - `action`: a callable accepting a single 'client' argument - `client`: `pyxmpp2.client.Client` """ def __init__(self, local_jid, action, settings): self.action = action self.client = Client(local_jid, [self], settings) def run(self): """Request client connection and start the main loop.""" self.client.connect() self.client.run() def disconnect(self): """Request disconnection and let the main loop run for a 2 more seconds for graceful disconnection.""" self.client.disconnect() self.client.run(timeout = 2) @event_handler(AuthorizedEvent) def handle_authorized(self, event): """Send the initial presence after log-in.""" # pylint: disable=W0613 self.action(self.client) self.client.disconnect() @event_handler(DisconnectedEvent) def handle_disconnected(self, event): """Quit the main loop upon disconnection.""" # pylint: disable=W0613,R0201 return QUIT def send_message(source_jid, password, target_jid, body, subject = None, message_type = "chat", message_thread = None, settings = None): """Star an XMPP session and send a message, then exit. :Parameters: - `source_jid`: sender JID - `password`: sender password - `target_jid`: recipient JID - `body`: message body - `subject`: message subject - `message_type`: message type - `message_thread`: message thread id - `settings`: other settings :Types: - `source_jid`: `pyxmpp2.jid.JID` or `basestring` - `password`: `basestring` - `target_jid`: `pyxmpp.jid.JID` or `basestring` - `body`: `basestring` - `subject`: `basestring` - `message_type`: `basestring` - `settings`: `pyxmpp2.settings.XMPPSettings` """ # pylint: disable=R0913,R0912 if sys.version_info.major < 3: # pylint: disable-msg=W0404 from locale import getpreferredencoding encoding = getpreferredencoding() if isinstance(source_jid, str): source_jid = source_jid.decode(encoding) if isinstance(password, str): password = password.decode(encoding) if isinstance(target_jid, str): target_jid = target_jid.decode(encoding) if isinstance(body, str): body = body.decode(encoding) if isinstance(message_type, str): message_type = message_type.decode(encoding) if isinstance(message_thread, str): message_thread = message_thread.decode(encoding) if not isinstance(source_jid, JID): source_jid = JID(source_jid) if not isinstance(target_jid, JID): target_jid = JID(target_jid) msg = Message(to_jid = target_jid, body = body, subject = subject, stanza_type = message_type) def action(client): """Send a mesage `msg` via a client.""" client.stream.send(msg) if settings is None: settings = XMPPSettings({"starttls": True, "tls_verify_peer": False}) if password is not None: settings["password"] = password handler = FireAndForget(source_jid, action, settings) try: handler.run() except KeyboardInterrupt: handler.disconnect() raise # vi: sts=4 et sw=4
apavlo/h-store
refs/heads/master
graphs/draw.py
4
#!/usr/bin/env python import sys import matplotlib.pyplot as plt import numpy as np def time_pred(in_path,out_path): fig = plt.figure() fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [] res2 =[] res3 = [] res4 = [] for line in open(in_path): fields = line.rstrip().split('\t') fields = [float(x) for x in fields] res1.append(fields[0]) res2.append(fields[1]) res3.append(fields[2]) res4.append(fields[3]) x = np.arange(0,1.1,0.1) print x ax.plot(x,res1,color='#FF6600',linestyle='-',marker='^',label='COLD') ax.plot(x,res2,color='#99CC00',linestyle='-.',marker='o',label='COLD,nolink') ax.plot(x,res3,color='#CC99FF',linestyle='-',marker='s',label='EUTB') ax.plot(x,res4,color='#99CCFF',linestyle='-.',marker='o',label='Pipeline') ax.legend(loc=4) ax.set_xlabel("Date Tolerance (% of #time-slice)",fontsize=16) ax.set_ylabel("Prediction Accuracy",fontsize=16) #plt.show() plt.savefig(out_path) def tuple_num(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [931.671451471, 12906.279152, 4123.76367542] res2 = [922.39838699, 13726.6611146, 4350.42673235] # \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline # TI & 0.7476 & 0.7505 & 0.7349 \\ \hline%\cline{2-4} # WTM & \multicolumn{3}{c}{0.7705} \\ \hline%\cline{2-4} # COLD(C=100) & 0.8283 & {\bf 0.8397} & 0.8254 \\ # \hline x = [0.5, 1.0, 1.5] ax.bar( [i-0.1 for i in x] ,res1,width=0.1,label='aLRU',hatch='\\',color='y') ax.bar( [i for i in x],res2,width=0.1,label='timestamps',hatch='/',color='c') ax.set_ylabel("Tuples per 100M",fontsize=16, weight='bold') #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=3) ax.set_xlim([0.2,1.8]) ax.set_ylim([0,15000]) ax.set_xticklabels(['YCSB','VOTER','TPC-C'],fontsize=16) ax.set_xlabel("Workload",fontsize=16, weight='bold') ax.set_xticks([0.5,1,1.5]) #plt.show() plt.savefig(out_path) def anti_cache(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [45000] res2 = [32000] # \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline # TI & 0.7476 & 0.7505 & 0.7349 \\ \hline%\cline{2-4} # WTM & \multicolumn{3}{c}{0.7705} \\ \hline%\cline{2-4} # COLD(C=100) & 0.8283 & {\bf 0.8397} & 0.8254 \\ # \hline x = [0.5, 1.0, 1.5] ax.bar( [0.45] ,res1,width=0.1,label='H-Store',hatch='\\',color='#228B22') ax.bar( [0.95],res2,width=0.1,label='anti-caching',hatch='/',color='#FF6600') ax.set_ylabel("Transactions per second",fontsize=16, weight='bold') #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=3) ax.set_xlim([0.2,1.3]) ax.set_ylim([0,50000]) ax.set_xticklabels(['data=memory','data=2Xmemory'],fontsize=16) #ax.set_xlabel("Workload",fontsize=16, weight='bold') ax.set_xticks([0.5,1]) #plt.show() plt.savefig(out_path) def ext_perplexity_perf(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [644455, 2036, 1962511, 4273859] res2 =[1492659, 613451, 4050569, 4575004] res3 = [1226661 , 2035, 2521207, 4837716] res4 = [2078536 , 571241, 4633451, 5204411] # \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline # PMTLM & 9889.48 & 8966.57 & 8483.49 \\ %\hline # EUTB & 4932.97 & 4778.50 & 4619.07 \\ %\hline # COLD(C=100) & 5200.46 & {\bf 4350.95} & 4394.46 \\ x = [0.5,1,1.5,2] ax.bar( [i-0.2 for i in x] ,res1,width=0.1,label='lru-read',hatch='\\',color='b') ax.bar( [i-0.1 for i in x],res2,width=0.1,label='ts-read',hatch='/',color='r') ax.bar( [i+0.00 for i in x] ,res3,width=0.1,label='lru-write',hatch='|',color='g') ax.bar( [i+0.1 for i in x] ,res4,width=0.1,label='ts-write',hatch='|',color='m') ax.set_ylabel("KBytes",fontsize=10) #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=4) ax.set_xlim([0.2,2.3]) ax.set_ylim([0,6000000]) ax.set_xticklabels(['S0.8','S1.0','S1.1', 'S1.2'],fontsize=16) ax.set_xlabel("Skew factor",fontsize=10) ax.set_xticks([0.5,1,1.5,2]) #plt.show() plt.savefig(out_path) def link_predict_perf(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [0.6105 , 0.7098 , 0.6840] res2 =[0.8398 , 0.8469 , 0.8425 ] res3 = [0.8507 , 0.8647, 0.8485] # \#community ($C$) & 50 & 100 & 150 \\ \hline %\hline # MMSB & 0.6105 & 0.7098 & 0.6840 \\ %\hline # PMTLM & 0.8398 & 0.8469 & 0.8425 \\ %\hline # COLD(K=100) & 0.8507 & {\bf 0.8647} & 0.8485 \\ x = [0.5,1,1.5] ax.bar( [i-0.15 for i in x] ,res1,width=0.1,label='MMSB',hatch='\\',color='#FF6600') ax.bar( [i-0.05 for i in x],res2,width=0.1,label='PMTLM',hatch='/',color='#99CC00') ax.bar( [i+0.05 for i in x] ,res3,width=0.1,label='COLD(K=100)',hatch='|',color='#CC99FF') ax.set_ylabel("Link Prediction (AUC)",fontsize=16) #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=3) ax.set_xlim([0.2,1.7]) ax.set_ylim([0.5,0.9]) ax.set_yticklabels(['0.5','','0.6','','0.7','', '0.8','', '0.9'],fontsize=16) ax.set_xticks([0.5,0.6,0.7,0.8,0.9]) ax.set_xticklabels(['50','100','150'],fontsize=16) ax.set_xlabel("#Community C",fontsize=16) ax.set_xticks([0.5,1,1.5]) #plt.show() plt.savefig(out_path) def skew4(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [2618.45, 17978.96, 11843.0155556, 25602.2333333] res2 =[6123.74, 28654.0766667, 13244.77, 32371.2922222] x = [0.5,1,1.5,2] ax.bar( [i-0.1 for i in x], res1,width=0.1,label='lru',hatch='/',color='#99CC00') ax.bar( [i+0.00 for i in x] ,res2,width=0.1,label='timestamps',hatch='|',color='#CC99FF') #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=2) ax.set_xlim([0.2,2.3]) ax.set_ylim([0,35000]) ax.set_xticklabels(['S0.8','S1.0','S1.1','S1.2'],fontsize=16) ax.set_xticks([0.5,1,1.5,2]) ax.set_xlabel("Skew Factor",fontsize=16) ax.set_ylabel("Transactions / second",fontsize=16) #plt.show() plt.savefig(out_path) def training_time(out_path): fig = plt.figure() #fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) res1 = [42.38, 39.12, 167.25 , 97.8525, 13.29608] methods = ('PMTLM', 'EUTB' ,'WTM' , 'COLD' , 'COLD(8)') y_pos = [1,2,3,4,5] # Methods & PMTLM & EUTB & WTM & COLD & COLD (8) \\ \hline %\hline # Time (h) & 42.38 & 39.12 & 167.25 & 129.84 & 11.29 ax.barh([i-0.25 for i in y_pos] ,res1,height = 0.5, hatch='/',color='#99CC00') #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_yticklabels(methods,fontsize=16) ax.set_yticks(y_pos) ax.set_xlabel("Time(s)",fontsize=16) ax.set_ylabel("Methods",fontsize=16) #plt.show() plt.savefig(out_path) def param_perplexity(in_path,out_path): fig = plt.figure() fig.set_size_inches(8,4.5) ax=fig.add_subplot(111) res1 = [] res2 =[] res3 = [] res4 = [] for line in open(in_path): fields = line.rstrip().split('\t') fields = [float(x) for x in fields] res1.append(fields[0]) res2.append(fields[1]) res3.append(fields[2]) res4.append(fields[3]) x = [0.5,1,1.5,2] ax.bar( [i-0.2 for i in x] ,res1,width=0.1,label='K=20',hatch='\\',color='#FF6600') ax.bar( [i-0.1 for i in x],res2,width=0.1,label='K=50',hatch='/',color='#99CC00') ax.bar( [i for i in x] ,res3,width=0.1,label='K=100',hatch='|',color='#CC99FF') ax.bar( [i+0.1 for i in x] ,res4,width=0.1,label='K=150',hatch='-',color='#99CCFF') ax.set_ylabel("Perplexity",fontsize=16) #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=4) ax.set_xlim([0.2,2.3]) ax.set_ylim([0,8600]) ax.set_xticklabels(['C=20','C=50','C=100','C=150'],fontsize=16) ax.set_xticks([0.5,1,1.5,2]) #ax.bar(res3) #ax.bar(res4) #plt.show() plt.savefig(out_path) def param_auc(in_path,out_path): fig = plt.figure() fig.set_size_inches(8,4.5) ax=fig.add_subplot(111) res1 = [0.804299893,0.842444446,0.849645276,0.8349456] res2=[0.837819382,0.848671287,0.859869915,0.84696871] res3=[0.849837227,0.859731227,0.864741202,0.856049581] res4=[0.845544536,0.838487469,0.844473523,0.830551247] # for line in open(in_path): # fields = line.rstrip().split('\t') # fields = [float(x) for x in fields] # res1.append(fields[0]) # res2.append(fields[1]) # res3.append(fields[2]) # res4.append(fields[3]) x = [0.5,1,1.5,2] ax.bar( [i-0.2 for i in x] ,res1,width=0.1,label='C=20',hatch='\\',color='#FF6600') ax.bar( [i-0.1 for i in x],res2,width=0.1,label='C=50',hatch='/',color='#99CC00') ax.bar( [i for i in x] ,res3,width=0.1,label='C=100',hatch='|',color='#CC99FF') ax.bar( [i+0.1 for i in x] ,res4,width=0.1,label='C=150',hatch='-',color='#99CCFF') ax.set_ylabel("AUC",fontsize=16) #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=4) ax.set_xlim([0.2,2.3]) ax.set_ylim([0.7,0.9]) ax.set_xticklabels(['K=20','K=50','K=100','K=150'],fontsize=16) ax.set_xticks([0.5,1,1.5,2]) #ax.bar(res3) #ax.bar(res4) #plt.show() plt.savefig(out_path) def param_rt_auc(out_path): fig = plt.figure() fig.set_size_inches(8,4.5) ax=fig.add_subplot(111) res1 = [0.816569569,0.818395692,0.816361342,0.785220436] res2=[0.817803569,0.820770394,0.827362933,0.805384206] res3=[0.821360782,0.828361933,0.839770432,0.825420544] res4=[0.80472904,0.822333338,0.825420544,0.809383506] x = [0.5,1,1.5,2] ax.bar( [i-0.2 for i in x] ,res1,width=0.1,label='K=20',hatch='\\',color='#FF6600') ax.bar( [i-0.1 for i in x],res2,width=0.1,label='K=50',hatch='/',color='#99CC00') ax.bar( [i for i in x] ,res3,width=0.1,label='K=100',hatch='|',color='#CC99FF') ax.bar( [i+0.1 for i in x] ,res4,width=0.1,label='K=150',hatch='-',color='#99CCFF') ax.set_ylabel("AUC",fontsize=16) #ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=4) ax.set_xlim([0.2,2.3]) ax.set_ylim([0.7,0.9]) ax.set_xticklabels(['C=20','C=50','C=100','C=150'],fontsize=16) ax.set_xticks([0.5,1,1.5,2]) #ax.bar(res3) #ax.bar(res4) #plt.show() plt.savefig(out_path) def time_lag(in_path,out_path): fig = plt.figure() fig.set_size_inches(8,4.8) ax = fig.add_subplot(111) x = [] res1 = [] res2 =[] input = open(in_path) lines = input.readlines() x = [int(i) for i in lines[0].split('\t')] res1 = [float(i) for i in lines[1].split('\t')] res2 = [float(i) for i in lines[2].split('\t')] ax.plot(x,res1,color='#FF6600',linestyle='-',marker='o',label='High-interested Comm') ax.plot(x,res2,color='#3366FF',linestyle='-',marker='^',label='Medium-interested Comm') #ax.legend(loc='upper right') ax.set_yticks([0,0.002,0.004,0.006,0.008,0.01,0.012]) ax.set_yticklabels(['0.000','0.002','0.004','0.006','0.008','0.010','0.012']) #ax.legend(loc='upper left') ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=2) ax.set_xlabel("Time Relative to Peak(Hour)",fontsize=16) ax.set_ylabel("Popularity",fontsize=16) #plt.show() plt.savefig(out_path) def correlate(in_path,out_fig): fig = plt.figure() #fig.set_size_inches(8,5) ax = fig.add_subplot(111) x= [] res = [] import math for line in open(in_path): fields = line.rstrip().split('\t') x.append( math.log(float(fields[0]),10) ) res.append(float(fields[1])) ax.scatter(x,res,color='#FF6600',marker='+') ax.set_xlabel("Community level Topic Interest",fontsize=18) ax.set_ylabel("Temporal Variance($10^{-5}$)",fontsize=18) ax.set_yticks([0,5e-6,10e-6,15e-6,20e-6,25e-6,30e-6]) ax.set_yticklabels(['0','0.5','1','1.5','2','2.5','3'],fontsize=18) ax.set_ylim([0,2e-5]) ax.set_xlim([-6,0]) ax.set_xticks([-6,-5,-4,-3,-2,-1,0]) ax.set_xticklabels(['$10^{-6}$','$10^{-5}$','$10^{-4}$','$10^{-3}$','$10^{-2}$','$10^{-1}$','1'],fontsize=18) #plt.show() plt.savefig(out_fig) def correlate_cdf(in_path,out_fig): fig = plt.figure() #fig.set_size_inches(8,5) ax = fig.add_subplot(111) x= [] res = [] import math for line in open(in_path): fields = line.rstrip().split('\t') x.append( math.log(float(fields[0]),10) ) res.append(float(fields[1])) ax.scatter(x,res,color='#FF6600',marker='+') y = res res_sum = sum(res) y = [i/res_sum for i in res] cy = np.cumsum(y) ax.set_xlabel("Community level Topic Interest",fontsize=16) ax.set_ylabel("Temporal Variance ($10^{-5}$)",fontsize=16) ax.set_yticks([0,5e-6,10e-6,15e-6,20e-6,25e-6,30e-6]) ax.set_yticklabels(['0','0.5','1','1.5','2','2.5','3'],fontsize=16) #ax.set_ylim([0,2e-5]) ax.set_ylim([0,2e-5]) ax.set_xlim([-6,0]) ax.set_xticks([-6,-5,-4,-3,-2,-1,0]) ax.set_xticklabels(['$10^{-6}$','$10^{-5}$','$10^{-4}$','$10^{-3}$','$10^{-2}$','$10^{-1}$','1'],fontsize=18) ax2 = ax.twinx() ax2.plot(x,cy) ax2.set_ylim([0,1]) ax2.set_ylabel("Cumulative Distribution Function", fontsize=16) plt.show() #plt.savefig(out_fig) def speedup(): fig = plt.figure() fig.set_size_inches(7,7) ax = fig.add_subplot(111) raw= [7.4795,17.9182,25.8796] ax.bar([i-0.2 for i in [1,2,3]],raw,width=0.4,hatch='\\',color='#FF6600') ax.set_xlabel("Data Size (#user)",fontsize=20) ax.set_ylabel("Time Cost (Hour)",fontsize=20) ax.set_xticks([1,2,3,]) ax.set_yticks([0,5,10,15,20,25,30]) ax.set_yticklabels(['0','5','10','15','20','25','30'],fontsize=20) ax.set_xticklabels(['0.18M','0.35M','0.52M'],fontsize=20) #plt.show() plt.savefig("./speedup_raw.pdf") def speedup2(): fig = plt.figure() fig.set_size_inches(7,7) ax = fig.add_subplot(111) raw= [97.8525,51.16725,25.8796,13.29608] ax.bar([i-0.2 for i in [1,2,3,4]],raw,width=0.4,hatch='/',color='#99CC00') ax.set_xlabel("GraphLab Nodes",fontsize=20) ax.set_ylabel("Time Cost (Hour)",fontsize=20) ax.set_xticks([1,2,3,4]) ax.set_xticklabels(['1','2','4','8'],fontsize=20) ax.set_yticks([0,40,80,120]) ax.set_yticklabels(['0','40','80','120'],fontsize=20) plt.savefig("./speedup_pal.pdf") if __name__ == "__main__": #in_path = sys.argv[1] #out_fig = sys.argv[2] #time_pred("./time-predict.txt","./time-predict.pdf") #param_perplexity('./param_per.txt','./param_per.pdf') #param_auc('./param_auc.txt','./ycsb-IO.pdf') #param_rt_auc('./param_rt_auc_rtnetwork.pdf') #rt_pred_time("./rt_pred_time_new.txt","./rt-pred-time.pdf") #diff_pred_perf("./ycsb-throughput.pdf") #training_time("./training_time.pdf") #ext_perplexity_perf("./ycsb-IO-new.pdf") #skew4("ycsb-new.pdf") #tuple_num("tuples.pdf") anti_cache("anti-cache.pdf") #link_predict_perf("./link_predict_perf.pdf") #time_lag("./time_lag.txt","./time_lag.pdf") #time_lag_fit("./time_lag.txt","./time_lag_fit.pdf") #correlate("./corr.txt","./corr.pdf") #correlate_cdf("./corr.txt","./corr_cdf.pdf") #speedup() #speedup2()
onceuponatimeforever/oh-mainline
refs/heads/master
vendor/packages/Django/tests/modeltests/raw_query/__init__.py
12133432
olympian94/gstudio
refs/heads/mongokit
gnowsys-ndf/gnowsys_ndf/mobwrite/migrations/__init__.py
12133432
houzhenggang/hiwifi-openwrt-HC5661-HC5761
refs/heads/master
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib-tk/test/test_tkinter/__init__.py
12133432
Rucknar/rancher
refs/heads/master
tests/server/ranchertests/__init__.py
12133432
Duke-NSOE/GeoHAT
refs/heads/master
GeoHat_V10/Scripts/networkx/generators/intersection.py
44
# -*- coding: utf-8 -*- """ Generators for random intersection graphs. """ # Copyright (C) 2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import random import networkx as nx __author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)']) __all__ = ['uniform_random_intersection_graph', 'k_random_intersection_graph', 'general_random_intersection_graph', ] def uniform_random_intersection_graph(n, m, p, seed=None): """Return a uniform random intersection graph. Parameters ---------- n : int The number of nodes in the first bipartite set (nodes) m : int The number of nodes in the second bipartite set (attributes) p : float Probability of connecting nodes between bipartite sets seed : int, optional Seed for random number generator (default=None). See Also -------- gnp_random_graph References ---------- .. [1] K.B. Singer-Cohen, Random Intersection Graphs, 1995, PhD thesis, Johns Hopkins University .. [2] Fill, J. A., Scheinerman, E. R., and Singer-Cohen, K. B., Random intersection graphs when m = !(n): An equivalence theorem relating the evolution of the g(n, m, p) and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176. """ G=nx.bipartite_random_graph(n, m, p, seed=seed) return nx.projected_graph(G, range(n)) def k_random_intersection_graph(n,m,k): """Return a intersection graph with randomly chosen attribute sets for each node that are of equal size (k). Parameters ---------- n : int The number of nodes in the first bipartite set (nodes) m : int The number of nodes in the second bipartite set (attributes) k : float Size of attribute set to assign to each node. seed : int, optional Seed for random number generator (default=None). See Also -------- gnp_random_graph, uniform_random_intersection_graph References ---------- .. [1] Godehardt, E., and Jaworski, J. Two models of random intersection graphs and their applications. Electronic Notes in Discrete Mathematics 10 (2001), 129--132. """ G = nx.empty_graph(n + m) mset = range(n,n+m) for v in range(n): targets = random.sample(mset, k) G.add_edges_from(zip([v]*len(targets), targets)) return nx.projected_graph(G, range(n)) def general_random_intersection_graph(n,m,p): """Return a random intersection graph with independent probabilities for connections between node and attribute sets. Parameters ---------- n : int The number of nodes in the first bipartite set (nodes) m : int The number of nodes in the second bipartite set (attributes) p : list of floats of length m Probabilities for connecting nodes to each attribute seed : int, optional Seed for random number generator (default=None). See Also -------- gnp_random_graph, uniform_random_intersection_graph References ---------- .. [1] Nikoletseas, S. E., Raptopoulos, C., and Spirakis, P. G. The existence and efficient construction of large independent sets in general random intersection graphs. In ICALP (2004), J. D´ıaz, J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142 of Lecture Notes in Computer Science, Springer, pp. 1029–1040. """ if len(p)!=m: raise ValueError("Probability list p must have m elements.") G = nx.empty_graph(n + m) mset = range(n,n+m) for u in range(n): for v,q in zip(mset,p): if random.random()<q: G.add_edge(u,v) return nx.projected_graph(G, range(n))
rht/zulip
refs/heads/master
zerver/webhooks/reviewboard/tests.py
3
# -*- coding: utf-8 -*- from zerver.lib.test_classes import WebhookTestCase class ReviewBoardHookTests(WebhookTestCase): STREAM_NAME = 'reviewboard' URL_TEMPLATE = '/api/v1/external/reviewboard?&api_key={api_key}&stream={stream}' FIXTURE_DIR_NAME = 'reviewboard' def test_review_request_published(self) -> None: expected_topic = 'Scheduler' expected_message = '**eeshangarg** opened [#2: Initial commit](https://rbcommons.com/s/zulip/r/2/):\n\n``` quote\n**Description**: Initial commit\n**Status**: pending\n**Target people**: **drsbgarg**\n**Branch**: master\n```' self.send_and_test_stream_message( 'review_request_published', expected_topic, expected_message) def test_review_request_published_with_multiple_target_people(self) -> None: expected_topic = 'Scheduler' expected_message = '**eeshangarg** opened [#2: Initial commit](https://rbcommons.com/s/zulip/r/2/):\n\n``` quote\n**Description**: Initial commit\n**Status**: pending\n**Target people**: **drsbgarg**, **johndoe**, and **janedoe**\n**Branch**: master\n```' self.send_and_test_stream_message( 'review_request_published__with_multiple_target_people', expected_topic, expected_message) def test_review_request_reopened(self) -> None: expected_topic = 'Scheduler' expected_message = '**eeshangarg** reopened [#1: Initial commit (first iteration)](https://rbcommons.com/s/zulip/r/1/):\n\n``` quote\n**Description**: Initial commit (first iteration)\n**Status**: pending\n**Target people**: **drsbgarg**\n**Branch**: master\n```' self.send_and_test_stream_message( 'review_request_reopened', expected_topic, expected_message) def test_review_request_closed(self) -> None: expected_topic = 'Scheduler' expected_message = '**eeshangarg** closed [#1: Initial commit (first iteration)](https://rbcommons.com/s/zulip/r/1/):\n\n``` quote\n**Description**: Initial commit (first iteration)\n**Status**: submitted\n**Target people**: **drsbgarg**\n**Close type**: submitted\n**Branch**: master\n```' self.send_and_test_stream_message( 'review_request_closed', expected_topic, expected_message) def test_review_published(self) -> None: expected_topic = 'Scheduler' expected_message = '**eeshangarg** [reviewed](https://rbcommons.com/s/zulip/r/1/#review651728) [#1: Initial commit (first iteration)](https://rbcommons.com/s/zulip/r/1/):\n\n**Review**:\n``` quote\nLeft some minor comments, thanks!\n```' self.send_and_test_stream_message( 'review_published', expected_topic, expected_message) def test_reply_published(self) -> None: expected_topic = 'Scheduler' expected_message = '**drsbgarg** [replied](https://rbcommons.com/s/zulip/api/review-requests/1/reviews/651728/replies/651732/) to [#1: Initial commit (first iteration)](https://rbcommons.com/s/zulip/api/review-requests/1/):\n\n**Reply**:\n``` quote\n\n```' self.send_and_test_stream_message( 'reply_published', expected_topic, expected_message)
InfiniaPress/Passenger-Pigeon
refs/heads/master
assets/bower_components/bower_components/forge/tests/forge_ssl/forge/__init__.py
12133432
Azure/azure-sdk-for-python
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
sdk/keyvault/azure-keyvault-certificates/azure/keyvault/certificates/_generated/v7_2/aio/__init__.py
17
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._key_vault_client import KeyVaultClient __all__ = ['KeyVaultClient']
darkleons/lama
refs/heads/master
addons/mrp/tests/test_multicompany.py
374
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests import common class TestMrpMulticompany(common.TransactionCase): def setUp(self): super(TestMrpMulticompany, self).setUp() cr, uid = self.cr, self.uid # Usefull models self.ir_model_data = self.registry('ir.model.data') self.res_users = self.registry('res.users') self.stock_location = self.registry('stock.location') group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user') group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager') company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1') self.multicompany_user_id = self.res_users.create(cr, uid, {'name': 'multicomp', 'login': 'multicomp', 'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])], 'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]}) def test_00_multicompany_user(self): """check no error on getting default mrp.production values in multicompany setting""" cr, uid, context = self.cr, self.multicompany_user_id, {} fields = ['location_src_id', 'location_dest_id'] defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context) for field in fields: if defaults.get(field): try: self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context) except Exception, exc: assert False, "unreadable location %s: %s" % (field, exc)
LigninTools/Ligpy-Cantera
refs/heads/master
Ligscipy/gen_ODE_scipy.py
1
#!/usr/bin/pythons # -*- coding: utf-8 -*- import sciligpy_utils as lig import constants as const def heating(species_IC='Pseudotsuga_menziesii', heating_rate=2.7, T0=25, Tmax=500): reactionlist, rateconstantlist, compositionlist, module_dir = lig.set_paths() y_list = lig.get_specieslist(reactionlist) speciesindices, indices_to_species = lig.get_speciesindices(y_list) kmatrix = lig.build_k_matrix(rateconstantlist) rate_list = lig.build_rates_list_kexp(rateconstantlist, reactionlist, speciesindices) species_rxns = lig.build_species_rxns_dict(reactionlist) dydt_expressions = lig.build_dydt_list(rate_list, y_list, species_rxns) PLIGC_0, PLIGH_0, PLIGO_0 = lig.define_initial_composition(compositionlist, species_IC) with open(module_dir+'ODE_scipy/solve_ODEs_heating_%s.py' % species_IC, 'w') as f: beginning = "#!/usr/bin/pythons\n" \ "# -*- coding: utf-8 -*-\n\n" \ "from scipy.integrate import odeint\n" \ "import time\n" \ "import numpy as np\n\n\n" \ "def ODEs(y, t, p):\n" f.write(beginning) # define y in function y = '\tT' for spec in y_list: y += (', ' + spec) y += ' = y' f.write(y + '\n') # define parameters in function parameters = '\talpha, R, A0, n0, E0' for i in range(1, len(kmatrix)): parameters += ', A%s, n%s, E%s' % (i, i, i) parameters += ' = p\n' f.write(parameters) # define ODEs in function dydt = '\tdydt = [alpha' for ODE in dydt_expressions: dydt += (', \n\t\t\t' + ODE.split('=')[1][1:-2]) dydt += ']' f.write(dydt + '\n') f.write('\treturn dydt\n\n') f.write('def run():\n\tstart = time.time()\n') # define values of parameters f.write('\talpha = %s\n\tR = %s\n' % (heating_rate, const.GAS_CONST)) f.write('\t# A, n, E values\n') for i in range(len(kmatrix)): f.write('\tA%s = %s\n\tn%s = %s\n\tE%s = %s\n' % (i, kmatrix[i][0], i, kmatrix[i][1], i, kmatrix[i][2])) # define initial condition f.write('\n\t# Initial conditions\n') f.write('\tT0 = %s\n' %(T0+273.15)) f.write('\tPLIGC = %s\n\tPLIGH = %s\n\tPLIGO = %s\n' % (PLIGC_0, PLIGH_0, PLIGO_0)) IC = '' for i in y_list: if i == 'PLIGC' or i == 'PLIGH' or i == 'PLIGO': continue else: IC += ('%s = ' % i) IC += '0' f.write('\t' + IC + '\n\n') # define ODE solver parameters f.write('\t# ODE solver parameters\n') f.write('\tabserr = %s\n' % const.ABSOLUTE_TOLERANCE) f.write('\trelerr = %s\n' % const.RELATIVE_TOLERANCE) stoptime = (Tmax-T0) / heating_rate f.write('\tstoptime = %s\n' %stoptime) f.write('\tnumpoints = %s\n\n' %(round(stoptime/0.1))) f.write('\tt = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n') y0 = '\ty0 = [T0' for spec in y_list: y0 += ', %s' % spec y0 += ']\n' f.write(y0) p = '\tp = [alpha, R' for i in range(len(kmatrix)): p += ', A%s, n%s, E%s' % (i, i, i) p += ']\n' f.write(p) """ f.write('\nstep = 0\n') f.write("with open('sol_heating_%s.dat', 'a') as f:\n" %species_IC) f.write('\tprint(t[0], "\\t".join(str(y) for y in y0), sep="\\t", file=f)\n') f.write('\twhile step < (len(t)-1):\n') f.write('\t\tysol = odeint(ODEs, y0, [t[step], t[step+1]], args=(p,), atol=abserr, rtol=relerr)\n') f.write('\t\tysol[1] = ysol[1].clip(min=0)\n') """ f.write('\n\tysol = odeint(ODEs, y0, t, args=(p,), atol=abserr, rtol=relerr, mxstep=5000)\n\n') f.write("\twith open('sol_heating_%s.dat', 'w') as f:\n" %species_IC) f.write("\t\tdata_format = '{:15.10f}' * %s\n" %(len(y_list)+2)) """ ysol = '' for i in range(len(y_list)): ysol += 'yy[%s], ' % i """ f.write('\t\tfor tt, yy in zip(t, ysol):\n') f.write('\t\t\tprint(data_format.format(tt, *yy), file=f)\n') f.write('\t\tend = time.time()\n\t\trun_time = end - start\n\t\tprint(run_time, file=f)\n\n\n') f.write("if __name__ == '__main__':\n\trun()") def isothermal(species_IC='Pseudotsuga_menziesii', continue_simu=True, T=500): reactionlist, rateconstantlist, compositionlist, module_dir = lig.set_paths() y_list = lig.get_specieslist(reactionlist) speciesindices, indices_to_species = lig.get_speciesindices(y_list) kmatrix = lig.build_k_matrix(rateconstantlist) rate_list = lig.build_rates_list_kexp(rateconstantlist, reactionlist, speciesindices) species_rxns = lig.build_species_rxns_dict(reactionlist) dydt_expressions = lig.build_dydt_list(rate_list, y_list, species_rxns) PLIGC_0, PLIGH_0, PLIGO_0 = lig.define_initial_composition(compositionlist, species_IC) stoptime = 2000 IC = [0] * len(y_list) IC[speciesindices['PLIGC']] = PLIGC_0 IC[speciesindices['PLIGH']] = PLIGH_0 IC[speciesindices['PLIGO']] = PLIGO_0 """ if continue_simu is True: with open('sol_heating_%s.dat' %species_IC, 'r') as f: pre_result = f.readlines()[-2] pre_time = eval(pre_result[0]) T = eval(pre_result[1]) - 273.15 stoptime = 2000 - pre_time with open(module_dir+'ODE_scipy/solve_ODEs_T%s_%s.py' %(T, species_IC), 'w') as f: beginning = "#!/usr/bin/pythons\n" \ "# -*- coding: utf-8 -*-\n\n" \ "from scipy.integrate import odeint\n" \ "import time\n" \ "start = time.time()\n" \ "def ODEs(y, t, p):\n" f.write(beginning) # define y in function y = '\t' + y_list[0] for i in y_list[1:]: y += (', ' + i) y += ' = y' f.write(y + '\n') # define parameters in function p = '\tk_%s_0' % T for i in range(1, len(kvaluelist)): p += ', k_%s_%s' % (T, i) p += ' = p' f.write(p + '\n') # define ODEs in function dydt = '\tdydt = [%s' % dydt_expressions[0].split('=')[1][1:-2] for ODE in dydt_expressions[1:]: dydt += (', \n\t\t\t' + ODE.split('=')[1][1:-2]) dydt += ']' f.write(dydt + '\n') f.write('\treturn dydt\n\n') # define values of parameters f.write('# k values\n') for i in range(len(kvaluelist)): f.write('k_%s_%s = %s\n' % (T, i, kvaluelist[i])) # define initial condition f.write('\n# Initial conditions\n') f.write('PLIGC = %s\nPLIGH = %s\nPLIGO = %s\n' % (PLIGC_0, PLIGH_0, PLIGO_0)) IC = '' for i in y_list: if i == 'PLIGC' or i == 'PLIGH' or i == 'PLIGO': continue else: IC += ('%s = ' % i) IC += '0' f.write(IC + '\n\n') f.write('# ODE solver parameters\n') f.write('abserr = %s\n' % const.ABSOLUTE_TOLERANCE) f.write('relerr = %s\n' % const.RELATIVE_TOLERANCE) f.write('stoptime = 2000\n') f.write('numpoints = 10000\n\n') f.write('t = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n') y0 = 'y0 = [' + y_list[0] for spec in y_list[1:]: y0 += ', %s' % spec y0 += ']' f.write(y0) p = '\np = [k_%s_0' % T for i in range(1, len(kvaluelist)): p += ', k_%s_%s' % (T, i) p += ']\n' f.write(p) f.write('\nysol = odeint(ODEs, y0, t, args=(p,), atol=abserr, rtol=relerr)\n\n') f.write("with open('sol_T%s_%s.dat', 'w') as f:\n" % (T, species_IC)) ysol = '' for i in range(len(y_list)): ysol += 'yy[%s], ' % i f.write('\tfor tt, yy in zip(t, ysol):\n') f.write('\t\tprint(tt, %sfile=f)\n' % ysol) """ if __name__ == "__main__": heating()
michalliu/OpenWrt-Firefly-Libraries
refs/heads/master
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/_threading_local.py
923
"""Thread-local objects. (Note that this module provides a Python version of the threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the `local` class from `threading`.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = sorted(mydata.__dict__.items()) ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ from weakref import ref from contextlib import contextmanager __all__ = ["local"] # We need to use objects from the threading module, but the threading # module may also want to use our `local` class, if support for locals # isn't compiled in to the `thread` module. This creates potential problems # with circular imports. For that reason, we don't import `threading` # until the bottom of this file (a hack sufficient to worm around the # potential problems). Note that all platforms on CPython do have support # for locals in the `thread` module, and there is no circular import problem # then, so problems introduced by fiddling the order of imports here won't # manifest. class _localimpl: """A class managing thread-local dicts""" __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' def __init__(self): # The key used in the Thread objects' attribute dicts. # We keep it a string for speed but make it unlikely to clash with # a "real" attribute. self.key = '_threading_local._localimpl.' + str(id(self)) # { id(Thread) -> (ref(Thread), thread-local dict) } self.dicts = {} def get_dict(self): """Return the dict for the current thread. Raises KeyError if none defined.""" thread = current_thread() return self.dicts[id(thread)][1] def create_dict(self): """Create a new dict for the current thread, and return it.""" localdict = {} key = self.key thread = current_thread() idt = id(thread) def local_deleted(_, key=key): # When the localimpl is deleted, remove the thread attribute. thread = wrthread() if thread is not None: del thread.__dict__[key] def thread_deleted(_, idt=idt): # When the thread is deleted, remove the local dict. # Note that this is suboptimal if the thread object gets # caught in a reference loop. We would like to be called # as soon as the OS-level thread ends instead. local = wrlocal() if local is not None: dct = local.dicts.pop(idt) wrlocal = ref(self, local_deleted) wrthread = ref(thread, thread_deleted) thread.__dict__[key] = wrlocal self.dicts[idt] = wrthread, localdict return localdict @contextmanager def _patch(self): impl = object.__getattribute__(self, '_local__impl') try: dct = impl.get_dict() except KeyError: dct = impl.create_dict() args, kw = impl.localargs self.__init__(*args, **kw) with impl.locallock: object.__setattr__(self, '__dict__', dct) yield class local: __slots__ = '_local__impl', '__dict__' def __new__(cls, *args, **kw): if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") self = object.__new__(cls) impl = _localimpl() impl.localargs = (args, kw) impl.locallock = RLock() object.__setattr__(self, '_local__impl', impl) # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. impl.create_dict() return self def __getattribute__(self, name): with _patch(self): return object.__getattribute__(self, name) def __setattr__(self, name, value): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) with _patch(self): return object.__setattr__(self, name, value) def __delattr__(self, name): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) with _patch(self): return object.__delattr__(self, name) from threading import current_thread, RLock
CityGrid/arsenal
refs/heads/master
server/arsenalweb/views/user.py
1
'''Arsenal user UI.''' # Copyright 2015 CityGrid Media, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from pyramid.view import view_config from pyramid.httpexceptions import HTTPFound from passlib.hash import sha512_crypt from arsenalweb.views import ( get_authenticated_user, site_layout, ) from arsenalweb.models.common import ( DBSession, User, ) LOG = logging.getLogger(__name__) @view_config(route_name='user', permission='view', renderer='arsenalweb:templates/user.pt') def view_user(request): '''Handle requests for user UI route.''' auth_user = get_authenticated_user(request) page_title_type = 'user/' page_title_name = 'User Data' change_pw = False perpage = 1 offset = 1 total = 1 if 'user.submitted' in request.POST: email_address = request.POST['email_address'] first_name = request.POST['first_name'] last_name = request.POST['last_name'] password = request.POST['password'] # Need some security checking here if email_address != auth_user['login']: print "Naughty monkey" else: # Update LOG.info('UPDATE: email_address=%s,first_name=%s,last_name=%s,password=%s' % (email_address, first_name, last_name, 'pass')) try: user = DBSession.query(User).filter(User.user_name==email_address).one() user.first_name = first_name user.last_name = last_name if password: LOG.info('Changing password for: %s' % email_address) salt = sha512_crypt.genconfig()[17:33] encrypted_password = sha512_crypt.encrypt(password, salt=salt) user.salt = salt user.password = encrypted_password DBSession.flush() return_url = '/logout?message=Your password has been changed successfully. Log in again.' return HTTPFound(return_url) DBSession.flush() except Exception, e: pass LOG.info("%s (%s)" % (Exception, e)) # Get the changes auth_user = get_authenticated_user(request) subtitle = auth_user['first_last'] column_selectors = [] return { 'au': auth_user, 'change_pw': change_pw, 'column_selectors': column_selectors, 'layout': site_layout('max'), 'offset': offset, 'page_title_name': page_title_name, 'page_title_type': page_title_type, 'perpage': perpage, 'subtitle': subtitle, 'total': total, }
darjeeling/django
refs/heads/master
tests/delete_regress/models.py
325
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models class Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType, models.CASCADE) content_object = GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award, models.CASCADE) note = models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child, models.CASCADE) toy = models.ForeignKey(Toy, models.CASCADE) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith, models.CASCADE) note = models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name="research_contacts") class Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, models.CASCADE, to_field="name") meal = models.CharField(max_length=20) # Models for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy, models.CASCADE) class Location(models.Model): version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version, models.CASCADE) location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True) # Models for #16128 class File(models.Model): pass class Image(File): class Meta: proxy = True class Photo(Image): class Meta: proxy = True class FooImage(models.Model): my_image = models.ForeignKey(Image, models.CASCADE) class FooFile(models.Model): my_file = models.ForeignKey(File, models.CASCADE) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo, models.CASCADE) class FooFileProxy(FooFile): class Meta: proxy = True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit, models.CASCADE) class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House, models.CASCADE) class Meta: ordering = ['name']
jsvelasquezv/agroind-mobile
refs/heads/master
platforms/browser/www/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py
960
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This generates makefiles suitable for inclusion into the Android build system # via an Android.mk file. It is based on make.py, the standard makefile # generator. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level GypAndroid.mk. This means that all # variables in .mk-files clobber one another, and furthermore that any # variables set potentially clash with other Android build system variables. # Try to avoid setting global variables where possible. import gyp import gyp.common import gyp.generator.make as make # Reuse global functions from make backend. import os import re import subprocess generator_default_variables = { 'OS': 'android', 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)', 'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)', 'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(RULE_SOURCES)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Generator-specific gyp specs. generator_additional_non_configuration_keys = [ # Boolean to declare that this target does not want its name mangled. 'android_unmangled_name', # Map of android build system variables to set. 'aosp_build_settings', ] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] ALL_MODULES_FOOTER = """\ # "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from # all the included sub-makefiles. This is just here to clarify. gyp_all_modules: """ header = """\ # This file is generated by gyp; do not edit. """ # Map gyp target types to Android module classes. MODULE_CLASSES = { 'static_library': 'STATIC_LIBRARIES', 'shared_library': 'SHARED_LIBRARIES', 'executable': 'EXECUTABLES', } def IsCPPExtension(ext): return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx' def Sourceify(path): """Convert a path to its source directory form. The Android backend does not support options.generator_output, so this function is a noop.""" return path # Map from qualified target to path to output. # For Android, the target of these maps is a tuple ('static', 'modulename'), # ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string, # since we link by module. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class AndroidMkWriter(object): """AndroidMkWriter packages up the writing of one target-specific Android.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, android_top_dir): self.android_top_dir = android_top_dir def Write(self, qualified_target, relative_target, base_path, output_filename, spec, configs, part_of_all, write_alias_target, sdk_version): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating relative_target: qualified target name relative to the root base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target sdk_version: what to emit for LOCAL_SDK_VERSION in output """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.relative_target = relative_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] self.android_class = MODULE_CLASSES.get(self.type, 'GYP') self.android_module = self.ComputeAndroidModule(spec) (self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec) self.output = self.output_binary = self.ComputeOutput(spec) # Standard header. self.WriteLn('include $(CLEAR_VARS)\n') # Module class and name. self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class) self.WriteLn('LOCAL_MODULE := ' + self.android_module) # Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE. # The library module classes fail if the stem is set. ComputeOutputParts # makes sure that stem == modulename in these cases. if self.android_stem != self.android_module: self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem) self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix) if self.toolset == 'host': self.WriteLn('LOCAL_IS_HOST_MODULE := true') self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)') elif sdk_version > 0: self.WriteLn('LOCAL_MODULE_TARGET_ARCH := ' '$(TARGET_$(GYP_VAR_PREFIX)ARCH)') self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version) # Grab output directories; needed for Actions and Rules. if self.toolset == 'host': self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))') else: self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))') self.WriteLn('gyp_shared_intermediate_dir := ' '$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))') self.WriteLn() # List files this target depends on so that actions/rules/copies/sources # can depend on the list. # TODO: doesn't pull in things through transitive link deps; needed? target_dependencies = [x[1] for x in deps if x[0] == 'path'] self.WriteLn('# Make sure our deps are built first.') self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES', local_pathify=True) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs) # GYP generated outputs. self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True) # Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend # on both our dependency targets and our generated files. self.WriteLn('# Make sure our deps and generated files are built first.') self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) ' '$(GYP_GENERATED_OUTPUTS)') self.WriteLn() # Sources. if spec.get('sources', []) or extra_sources: self.WriteSources(spec, configs, extra_sources) self.WriteTarget(spec, configs, deps, link_deps, part_of_all, write_alias_target) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = ('path', self.output_binary) # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = ('static', self.android_module) elif self.type == 'shared_library': target_link_deps[qualified_target] = ('shared', self.android_module) self.fp.close() return self.android_module def WriteActions(self, actions, extra_sources, extra_outputs): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) """ for action in actions: name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Action for target "%s" writes output to local path ' '"%s".' % (self.target, out)) dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Prepare the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: quiet_cmd = 'Gyp action: %s ($@)' % action['message'] else: quiet_cmd = 'Gyp action: %s ($@)' % name if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the gyp_* # variables for the action rule with an absolute version so that the # output goes in the right place. # Only write the gyp_* rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. main_output = make.QuoteSpaces(self.LocalPathify(outputs[0])) self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # Android's envsetup.sh adds a number of directories to the path including # the built host binary directory. This causes actions/rules invoked by # gyp to sometimes use these instead of system versions, e.g. bison. # The built host binaries may not be suitable, and can cause errors. # So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable # set by envsetup. self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) # Don't allow spaces in input/output filenames, but make an exception for # filenames which start with '$(' since it's okay for there to be spaces # inside of make function/macro invocations. for input in inputs: if not input.startswith('$(') and ' ' in input: raise gyp.common.GypError( 'Action input filename "%s" in target %s contains a space' % (input, self.target)) for output in outputs: if not output.startswith('$(') and ' ' in output: raise gyp.common.GypError( 'Action output filename "%s" in target %s contains a space' % (output, self.target)) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, ' '.join(map(self.LocalPathify, inputs)))) self.WriteLn('\t@echo "%s"' % quiet_cmd) self.WriteLn('\t$(hide)%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output)) extra_outputs += outputs self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) """ if len(rules) == 0: return for rule in rules: if len(rule.get('rule_sources', [])) == 0: continue name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, rule['rule_name'])) self.WriteLn('\n### Generated for rule "%s":' % name) self.WriteLn('# "%s":' % rule) inputs = rule.get('inputs') for rule_source in rule.get('rule_sources', []): (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Rule for target %s writes output to local path %s' % (self.target, out)) dir = os.path.dirname(out) if dir: dirs.add(dir) extra_outputs += outputs if int(rule.get('process_outputs_as_sources', False)): extra_sources.extend(outputs) components = [] for component in rule['action']: component = self.ExpandInputRoot(component, rule_source_root, rule_source_dirname) if '$(RULE_SOURCES)' in component: component = component.replace('$(RULE_SOURCES)', rule_source) components.append(component) command = gyp.common.EncodePOSIXShellList(components) cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command if dirs: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # We set up a rule to build the first output, and then set up # a rule for each additional output to depend on the first. outputs = map(self.LocalPathify, outputs) main_output = outputs[0] self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # See explanation in WriteActions. self.WriteLn('%s: export PATH := ' '$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) main_output_deps = self.LocalPathify(rule_source) if inputs: main_output_deps += ' ' main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs]) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, main_output_deps)) self.WriteLn('\t%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (output, main_output)) self.WriteLn() self.WriteLn() def WriteCopies(self, copies, extra_outputs): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) """ self.WriteLn('### Generated for copy rule.') variable = make.StringToMakefileVariable(self.relative_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # The Android build system does not allow generation of files into the # source tree. The destination should start with a variable, which will # typically be $(gyp_intermediate_dir) or # $(gyp_shared_intermediate_dir). Note that we can't use an assertion # because some of the gyp tests depend on this. if not copy['destination'].startswith('$'): print ('WARNING: Copy rule for target %s writes output to ' 'local path %s' % (self.target, copy['destination'])) # LocalPathify() calls normpath, stripping trailing slashes. path = Sourceify(self.LocalPathify(path)) filename = os.path.split(path)[1] output = Sourceify(self.LocalPathify(os.path.join(copy['destination'], filename))) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' % (output, path)) self.WriteLn('\t@echo Copying: $@') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) $(ACP) -rpf $< $@') self.WriteLn() outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(make.QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSourceFlags(self, spec, configs): """Write out the flags and include paths used to compile source files for the current target. Args: spec, configs: input from gyp. """ for configname, config in sorted(configs.iteritems()): extracted_includes = [] self.WriteLn('\n# Flags passed to both C and C++ files.') cflags, includes_from_cflags = self.ExtractIncludesFromCFlags( config.get('cflags', []) + config.get('cflags_c', [])) extracted_includes.extend(includes_from_cflags) self.WriteList(cflags, 'MY_CFLAGS_%s' % configname) self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname, prefix='-D', quoter=make.EscapeCppDefine) self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS') includes = list(config.get('include_dirs', [])) includes.extend(extracted_includes) includes = map(Sourceify, map(self.LocalPathify, includes)) includes = self.NormalizeIncludePaths(includes) self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname) self.WriteLn('\n# Flags passed to only C++ (and not C) files.') self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname) self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) ' '$(MY_DEFS_$(GYP_CONFIGURATION))') # Undefine ANDROID for host modules # TODO: the source code should not use macro ANDROID to tell if it's host # or target module. if self.toolset == 'host': self.WriteLn('# Undefine ANDROID for host modules') self.WriteLn('LOCAL_CFLAGS += -UANDROID') self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) ' '$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))') self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))') # Android uses separate flags for assembly file invocations, but gyp expects # the same CFLAGS to be applied: self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') def WriteSources(self, spec, configs, extra_sources): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. We need to handle shared_intermediate directory source files as a special case by copying them to the intermediate directory and treating them as a genereated sources. Otherwise the Android build rules won't pick them up. Args: spec, configs: input from gyp. extra_sources: Sources generated from Actions or Rules. """ sources = filter(make.Compilable, spec.get('sources', [])) generated_not_sources = [x for x in extra_sources if not make.Compilable(x)] extra_sources = filter(make.Compilable, extra_sources) # Determine and output the C++ extension used by these sources. # We simply find the first C++ file and use that extension. all_sources = sources + extra_sources local_cpp_extension = '.cpp' for source in all_sources: (root, ext) = os.path.splitext(source) if IsCPPExtension(ext): local_cpp_extension = ext break if local_cpp_extension != '.cpp': self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension) # We need to move any non-generated sources that are coming from the # shared intermediate directory out of LOCAL_SRC_FILES and put them # into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files # that don't match our local_cpp_extension, since Android will only # generate Makefile rules for a single LOCAL_CPP_EXTENSION. local_files = [] for source in sources: (root, ext) = os.path.splitext(source) if '$(gyp_shared_intermediate_dir)' in source: extra_sources.append(source) elif '$(gyp_intermediate_dir)' in source: extra_sources.append(source) elif IsCPPExtension(ext) and ext != local_cpp_extension: extra_sources.append(source) else: local_files.append(os.path.normpath(os.path.join(self.path, source))) # For any generated source, if it is coming from the shared intermediate # directory then we add a Make rule to copy them to the local intermediate # directory first. This is because the Android LOCAL_GENERATED_SOURCES # must be in the local module intermediate directory for the compile rules # to work properly. If the file has the wrong C++ extension, then we add # a rule to copy that to intermediates and use the new version. final_generated_sources = [] # If a source file gets copied, we still need to add the orginal source # directory as header search path, for GCC searches headers in the # directory that contains the source file by default. origin_src_dirs = [] for source in extra_sources: local_file = source if not '$(gyp_intermediate_dir)/' in local_file: basename = os.path.basename(local_file) local_file = '$(gyp_intermediate_dir)/' + basename (root, ext) = os.path.splitext(local_file) if IsCPPExtension(ext) and ext != local_cpp_extension: local_file = root + local_cpp_extension if local_file != source: self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source))) self.WriteLn('\tmkdir -p $(@D); cp $< $@') origin_src_dirs.append(os.path.dirname(source)) final_generated_sources.append(local_file) # We add back in all of the non-compilable stuff to make sure that the # make rules have dependencies on them. final_generated_sources.extend(generated_not_sources) self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES') origin_src_dirs = gyp.common.uniquer(origin_src_dirs) origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs)) self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS') self.WriteList(local_files, 'LOCAL_SRC_FILES') # Write out the flags used to compile the source; this must be done last # so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path. self.WriteSourceFlags(spec, configs) def ComputeAndroidModule(self, spec): """Return the Android module name used for a gyp spec. We use the complete qualified target name to avoid collisions between duplicate targets in different directories. We also add a suffix to distinguish gyp-generated module names. """ if int(spec.get('android_unmangled_name', 0)): assert self.type != 'shared_library' or self.target.startswith('lib') return self.target if self.type == 'shared_library': # For reasons of convention, the Android build system requires that all # shared library modules are named 'libfoo' when generating -l flags. prefix = 'lib_' else: prefix = '' if spec['toolset'] == 'host': suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp' else: suffix = '_gyp' if self.path: middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target)) else: middle = make.StringToMakefileVariable(self.target) return ''.join([prefix, middle, suffix]) def ComputeOutputParts(self, spec): """Return the 'output basename' of a gyp spec, split into filename + ext. Android libraries must be named the same thing as their module name, otherwise the linker can't find them, so product_name and so on must be ignored if we are building a library, and the "lib" prepending is not done for Android. """ assert self.type != 'loadable_module' # TODO: not supported? target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': target = self.ComputeAndroidModule(spec) target_ext = '.a' elif self.type == 'shared_library': target = self.ComputeAndroidModule(spec) target_ext = '.so' elif self.type == 'none': target_ext = '.stamp' elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) if self.type != 'static_library' and self.type != 'shared_library': target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext target_stem = target_prefix + target return (target_stem, target_ext) def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ return ''.join(self.ComputeOutputParts(spec)) def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = ('$(call intermediates-dir-for,%s,%s,true,,' '$(GYP_HOST_VAR_PREFIX))' % (self.android_class, self.android_module)) else: path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))' % (self.android_class, self.android_module)) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec)) def NormalizeIncludePaths(self, include_paths): """ Normalize include_paths. Convert absolute paths to relative to the Android top directory. Args: include_paths: A list of unprocessed include paths. Returns: A list of normalized include paths. """ normalized = [] for path in include_paths: if path[0] == '/': path = gyp.common.RelativePath(path, self.android_top_dir) normalized.append(path) return normalized def ExtractIncludesFromCFlags(self, cflags): """Extract includes "-I..." out from cflags Args: cflags: A list of compiler flags, which may be mixed with "-I.." Returns: A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed. """ clean_cflags = [] include_paths = [] for flag in cflags: if flag.startswith('-I'): include_paths.append(flag[2:]) else: clean_cflags.append(flag) return (clean_cflags, include_paths) def FilterLibraries(self, libraries): """Filter the 'libraries' key to separate things that shouldn't be ldflags. Library entries that look like filenames should be converted to android module names instead of being passed to the linker as flags. Args: libraries: the value of spec.get('libraries') Returns: A tuple (static_lib_modules, dynamic_lib_modules, ldflags) """ static_lib_modules = [] dynamic_lib_modules = [] ldflags = [] for libs in libraries: # Libs can have multiple words. for lib in libs.split(): # Filter the system libraries, which are added by default by the Android # build system. if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or lib.endswith('libgcc.a')): continue match = re.search(r'([^/]+)\.a$', lib) if match: static_lib_modules.append(match.group(1)) continue match = re.search(r'([^/]+)\.so$', lib) if match: dynamic_lib_modules.append(match.group(1)) continue if lib.startswith('-l'): ldflags.append(lib) return (static_lib_modules, dynamic_lib_modules, ldflags) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTargetFlags(self, spec, configs, link_deps): """Write Makefile code to specify the link flags and library dependencies. spec, configs: input from gyp. link_deps: link dependency list; see ComputeDeps() """ # Libraries (i.e. -lfoo) # These must be included even for static libraries as some of them provide # implicit include paths through the build system. libraries = gyp.common.uniquer(spec.get('libraries', [])) static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries) if self.type != 'static_library': for configname, config in sorted(configs.iteritems()): ldflags = list(config.get('ldflags', [])) self.WriteLn('') self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname) self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS') self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) ' '$(LOCAL_GYP_LIBS)') # Link dependencies (i.e. other gyp targets this target depends on) # These need not be included for static libraries as within the gyp build # we do not use the implicit include path mechanism. if self.type != 'static_library': static_link_deps = [x[1] for x in link_deps if x[0] == 'static'] shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared'] else: static_link_deps = [] shared_link_deps = [] # Only write the lists if they are non-empty. if static_libs or static_link_deps: self.WriteLn('') self.WriteList(static_libs + static_link_deps, 'LOCAL_STATIC_LIBRARIES') self.WriteLn('# Enable grouping to fix circular references') self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true') if dynamic_libs or shared_link_deps: self.WriteLn('') self.WriteList(dynamic_libs + shared_link_deps, 'LOCAL_SHARED_LIBRARIES') def WriteTarget(self, spec, configs, deps, link_deps, part_of_all, write_alias_target): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ self.WriteLn('### Rules for final target.') if self.type != 'none': self.WriteTargetFlags(spec, configs, link_deps) settings = spec.get('aosp_build_settings', {}) if settings: self.WriteLn('### Set directly by aosp_build_settings.') for k, v in settings.iteritems(): if isinstance(v, list): self.WriteList(v, k) else: self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v))) self.WriteLn('') # Add to the set of targets which represent the gyp 'all' target. We use the # name 'gyp_all_modules' as the Android build system doesn't allow the use # of the Make target 'all' and because 'all_modules' is the equivalent of # the Make target 'all' on Android. if part_of_all and write_alias_target: self.WriteLn('# Add target alias to "gyp_all_modules" target.') self.WriteLn('.PHONY: gyp_all_modules') self.WriteLn('gyp_all_modules: %s' % self.android_module) self.WriteLn('') # Add an alias from the gyp target name to the Android module name. This # simplifies manual builds of the target, and is required by the test # framework. if self.target != self.android_module and write_alias_target: self.WriteLn('# Alias gyp target name.') self.WriteLn('.PHONY: %s' % self.target) self.WriteLn('%s: %s' % (self.target, self.android_module)) self.WriteLn('') # Add the command to trigger build of the target type depending # on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY # NOTE: This has to come last! modifier = '' if self.toolset == 'host': modifier = 'HOST_' if self.type == 'static_library': self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier) elif self.type == 'shared_library': self.WriteLn('LOCAL_PRELINK_MODULE := false') self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier) elif self.type == 'executable': self.WriteLn('LOCAL_CXX_STL := libc++_static') # Executables are for build and test purposes only, so they're installed # to a directory that doesn't get included in the system image. self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)') self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier) else: self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp') self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true') if self.toolset == 'target': self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)') else: self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)') self.WriteLn() self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk') self.WriteLn() self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)') self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) touch $@') self.WriteLn() self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') def WriteList(self, value_list, variable=None, prefix='', quoter=make.QuoteIfNecessary, local_pathify=False): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] if local_pathify: value_list = [self.LocalPathify(l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteLn(self, text=''): self.fp.write(text + '\n') def LocalPathify(self, path): """Convert a subdirectory-relative path into a normalized path which starts with the make variable $(LOCAL_PATH) (i.e. the top of the project tree). Absolute paths, or paths that contain variables, are just normalized.""" if '$(' in path or os.path.isabs(path): # path is not a file in the project tree in this case, but calling # normpath is still important for trimming trailing slashes. return os.path.normpath(path) local_path = os.path.join('$(LOCAL_PATH)', self.path, path) local_path = os.path.normpath(local_path) # Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH) # - i.e. that the resulting path is still inside the project tree. The # path may legitimately have ended up containing just $(LOCAL_PATH), though, # so we don't look for a slash. assert local_path.startswith('$(LOCAL_PATH)'), ( 'Path %s attempts to escape from gyp path %s !)' % (path, self.path)) return local_path def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return os.path.normpath(path) def PerformBuild(data, configurations, params): # The android backend only supports the default configuration. options = params['options'] makefile = os.path.abspath(os.path.join(options.toplevel_dir, 'GypAndroid.mk')) env = dict(os.environ) env['ONE_SHOT_MAKEFILE'] = makefile arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules'] print 'Building: %s' % arguments subprocess.check_call(arguments, env=env) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') limit_to_target_all = generator_flags.get('limit_to_target_all', False) write_alias_targets = generator_flags.get('write_alias_targets', True) sdk_version = generator_flags.get('aosp_sdk_version', 0) android_top_dir = os.environ.get('ANDROID_BUILD_TOP') assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.' def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'GypAndroid' + options.suffix + '.mk' makefile_path = os.path.join(options.toplevel_dir, makefile_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(header) # We set LOCAL_PATH just once, here, to the top of the project tree. This # allows all the other paths we use to be relative to the Android.mk file, # as the Android build system expects. root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n') # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() android_modules = {} for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) relative_build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) build_files.add(relative_build_file) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] part_of_all = qualified_target in needed_targets if limit_to_target_all and not part_of_all: continue relative_target = gyp.common.QualifiedTarget(relative_build_file, target, toolset) writer = AndroidMkWriter(android_top_dir) android_module = writer.Write(qualified_target, relative_target, base_path, output_file, spec, configs, part_of_all=part_of_all, write_alias_target=write_alias_targets, sdk_version=sdk_version) if android_module in android_modules: print ('ERROR: Android module names must be unique. The following ' 'targets both generate Android module name %s.\n %s\n %s' % (android_module, android_modules[android_module], qualified_target)) return android_modules[android_module] = qualified_target # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration) root_makefile.write('GYP_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_MULTILIB ?= first\n') # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n') root_makefile.write('\n') if write_alias_targets: root_makefile.write(ALL_MODULES_FOOTER) root_makefile.close()
MarcDufresne/SysWatch
refs/heads/master
ServerMonitorClient.py
1
from simplejson import dumps as json_dumps from flask import Flask, Response from flask.templating import render_template from system_apis import api import config app = Flask(__name__) DISKS = () PROCESSES = () @app.route('/') def home(): cpu_info = api.get_cpu_info() ram_info = api.get_ram_info() network = api.get_network_info() if PROCESSES: processes = api.get_filtered_processes(PROCESSES.split(',')) else: processes = [] disks_data = {} for disk in DISKS: data = api.get_disk_info(disk) if data: disks_data[disk] = data uptime_response = api.get_uptime() if uptime_response: uptime = uptime_response.get('uptime') else: uptime = None ctx = { 'cpu': cpu_info, 'ram': ram_info, 'uptime': uptime, 'network': network, 'disks': disks_data, 'processes': processes, } return render_template('home/home.html', ctx=ctx) # API URLs @app.route('/api/cpu/') def api_get_cpu_info(): ctx = api.get_cpu_info() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/cpu/usage/') def api_get_cpu_usage(): ctx = api.get_cpu_usage() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/ram/') def api_get_ram_info(): ctx = api.get_ram_info() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/ram/usage/') def api_get_ram_usage(): ctx = api.get_ram_usage() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/uptime/') def api_get_uptime(): ctx = api.get_uptime() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/network/') def api_get_network_info(): ctx = api.get_network_info() return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/disks/') def api_get_all_disks_info(): disks_data = {} for disk in DISKS: data = api.get_disk_info(disk) if data: disks_data[disk] = data return Response(response=json_dumps(disks_data), mimetype="application/json") @app.route('/api/disk/', defaults={'disk': '/'}) @app.route('/api/disk/<path:disk>') def api_get_disk_info(disk='/'): if not disk.startswith('/'): disk = '/' + disk ctx = api.get_disk_info(disk=disk) return Response(response=json_dumps(ctx), mimetype="application/json") @app.route('/api/process/', defaults={'process_filter': PROCESSES}) @app.route('/api/process/<process_filter>') def api_get_filtered_processes(process_filter=None): if not process_filter: process_filter = PROCESSES if PROCESSES else '' process_filter = process_filter.split(',') ctx = api.get_filtered_processes(process_filter) return Response(response=json_dumps(ctx), mimetype="application/json") if __name__ == '__main__': config.check_config_file() app_config = config.load_config_file() PROCESSES = app_config.get('processes') DISKS = app_config.get('disks', '').split(',') app.run(threaded=True, host='0.0.0.0', debug=True)
dulems/hue
refs/heads/master
desktop/core/ext-py/Django-1.6.10/tests/aggregation/models.py
114
# coding: utf-8 from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Author(models.Model): name = models.CharField(max_length=100) age = models.IntegerField() friends = models.ManyToManyField('self', blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Publisher(models.Model): name = models.CharField(max_length=255) num_awards = models.IntegerField() def __str__(self): return self.name @python_2_unicode_compatible class Book(models.Model): isbn = models.CharField(max_length=9) name = models.CharField(max_length=255) pages = models.IntegerField() rating = models.FloatField() price = models.DecimalField(decimal_places=2, max_digits=6) authors = models.ManyToManyField(Author) contact = models.ForeignKey(Author, related_name='book_contact_set') publisher = models.ForeignKey(Publisher) pubdate = models.DateField() def __str__(self): return self.name @python_2_unicode_compatible class Store(models.Model): name = models.CharField(max_length=255) books = models.ManyToManyField(Book) original_opening = models.DateTimeField() friday_night_closing = models.TimeField() def __str__(self): return self.name
mkschreder/openwrt
refs/heads/master
scripts/flashing/jungo-image.py
761
#!/usr/bin/env python # # Copyright 2008, 2009 (C) Jose Vasconcellos <jvasco@verizon.net> # # A script that can communicate with jungo-based routers # (such as MI424-WR, USR8200 and WRV54G) to backup the installed # firmware and replace the boot loader. # # Tested with Python 2.5 on Linux and Windows # """Usage: %s [options] <IP_address> [image.bin | url] Valid options: \t-h | --help: usage statement \t-d | --dump: create a flash dump \t-f | --file: use <filename> to store dump contents \t-u | --user: provide username (default admin) \t-p | --pass: provide password (default password1) \t --port: set port for http (default 8080) \t-q | --quiet: don't display unnecessary information \t-r | --reboot: reboot target on successful transfer \t-V | --version: display version information If no image (or url) is given, a flash dump is created. A built-in http server is used when an image file is provided. """ import os import sys import getopt import getpass import telnetlib import string import binascii import socket import thread import SocketServer import SimpleHTTPServer reboot = 0 HOST = "192.168.1.1" PORT = 8080 user = "admin" #password = getpass.getpass() password = "password1" proto = "http" url = "" imagefile = "" dumpfile = "" verbose = 1 do_dump = 0 dumplen = 0x10000 flashsize=4*1024*1024 #device="br0" device="ixp0" #################### def start_server(server): httpd = SocketServer.TCPServer((server,PORT),SimpleHTTPServer.SimpleHTTPRequestHandler) thread.start_new_thread(httpd.serve_forever,()) #################### def get_flash_size(): # make sure we don't have an A0 stepping tn.write("cat /proc/cpuinfo\n") buf = tn.read_until("Returned 0", 3) if not buf: print "Unable to obtain CPU information; make sure to not use A0 stepping!" elif buf.find('rev 0') > 0: print "Warning: IXP42x stepping A0 detected!" if imagefile or url: print "Error: No linux support for A0 stepping!" sys.exit(2) # now get flash size tn.write("cat /proc/mtd\n") buf = tn.read_until("Returned 0", 3) if buf: i = buf.find('mtd0:') if i > 0: return int(buf[i+6:].split()[0],16) # use different command tn.write("flash_layout\n") buf = tn.read_until("Returned 0", 3) i = buf.rfind('Range ') if i > 0: return int(buf[i+17:].split()[0],16) print "Can't determine flash size!" else: print "Unable to obtain flash size!" sys.exit(2) def image_dump(tn, dumpfile): if not dumpfile: tn.write("ver\n"); buf = tn.read_until("Returned 0",2) i = buf.find("Platform:") if i < 0: platform="jungo" else: line=buf[i+9:] i=line.find('\n') platform=line[:i].split()[-1] tn.write("rg_conf_print /dev/%s/mac\n" % device); buf = tn.read_until("Returned 0",3) i = buf.find("mac(") if i > 0: i += 4 else: print "No MAC address found! (use -f option)" sys.exit(1) dumpfile = "%s-%s.bin" % (platform, buf[i:i+17].replace(':','')) else: tn.write("\n") print "Dumping flash contents (%dMB) to %s" % (flashsize/1048576, dumpfile) f = open(dumpfile, "wb") t=flashsize/dumplen for addr in range(t): if verbose: sys.stdout.write('\r%d%%'%(100*addr/t)) sys.stdout.flush() tn.write("flash_dump -r 0x%x -l %d -4\n" % (addr*dumplen, dumplen)) tn.read_until("\n") count = addr*dumplen while 1: buf = tn.read_until("\n") if buf.strip() == "Returned 0": break s = buf.split() if s and s[0][-1] == ':': a=int(s[0][:-1],16) if a != count: print "Format error: %x != %x"%(a,count) sys.exit(2) count += 16 f.write(binascii.a2b_hex(string.join(s[1:],''))) tn.read_until(">",1) f.close() if verbose: print "" def telnet_option(sock,cmd,option): #print "Option: %d %d" % (ord(cmd), ord(option)) if cmd == telnetlib.DO: c=telnetlib.WILL elif cmd == telnetlib.WILL: c=telnetlib.DO sock.sendall(telnetlib.IAC + c + option) def telnet_timeout(): print "Fatal error: telnet timeout!" sys.exit(1) def usage(): print __doc__ % os.path.basename(sys.argv[0]) #################### try: opts, args = getopt.getopt(sys.argv[1:], "hdf:qp:P:rvV", \ ["help", "dump", "file=", "user=", "pass=", "port=", "quiet=", "reboot", "verbose", "version"]) except getopt.GetoptError: # print help information and exit: usage() sys.exit(1) for o, a in opts: if o in ("-h", "--help"): usage() sys.exit(1) elif o in ("-V", "--version"): print "%s: 0.11" % sys.argv[0] sys.exit(1) elif o in ("-d", "--no-dump"): do_dump = 1 elif o in ("-f", "--file"): dumpfile = a elif o in ("-u", "--user"): user = a elif o in ("-p", "--pass"): password = a elif o == "--port": PORT = int(a) elif o in ("-q", "--quiet"): verbose = 0 elif o in ("-r", "--reboot"): reboot = 1 elif o in ("-v", "--verbose"): verbose = 1 # make sure we have enough arguments if len(args) > 0: HOST = args[0] if len(args) == 2: if args[1].split(':')[0] in ("tftp", "http", "ftp"): url = args[1] else: imagefile = args[1] else: do_dump = 1; #################### # create a telnet session to the router try: tn = telnetlib.Telnet(HOST) except socket.error, msg: print "Unable to establish telnet session to %s: %s" % (HOST, msg) sys.exit(1) tn.set_option_negotiation_callback(telnet_option) buf = tn.read_until("Username: ", 3) if not buf: telnet_timeout() tn.write(user+"\n") if password: buf = tn.read_until("Password: ", 3) if not buf: telnet_timeout() tn.write(password+"\n") # wait for prompt buf = tn.read_until("> ", 3) if not buf: telnet_timeout() flashsize = get_flash_size() if do_dump: image_dump(tn, dumpfile) if imagefile or url: splitpath = os.path.split(imagefile) # create load command if url: cmd = "load -u %s -r 0\n" % (url) else: server = tn.get_socket().getsockname()[0] cmd = "load -u http://%s:%d/%s -r 0\n" % (server, PORT, splitpath[1]) if not os.access(imagefile, os.R_OK): print "File access error: %s" % (imagefile) sys.exit(3) # make sure we're in the directory where the image is located if splitpath[0]: os.chdir(splitpath[0]) start_server(server) if verbose: print "Unlocking flash..." tn.write("unlock 0 0x%x\n" % flashsize) buf = tn.read_until("Returned 0",5) if verbose: print "Writing new image..." print cmd, tn.write(cmd) buf = tn.read_until("Returned 0",10) # wait till the transfer completed buf = tn.read_until("Download completed successfully",20) if buf: print "Flash update complete!" if reboot: tn.write("reboot\n") print "Rebooting..." tn.write("exit\n") tn.close()
ryanmt/mitro
refs/heads/master
mitro-core/tools/dog.py
25
#!/usr/bin/python from collections import defaultdict import argparse import json import re import struct import sys import time import os POINTER_FILE_SUFFIX = '.pointers' parser = argparse.ArgumentParser(description='search logs for stuff. Usage: ./dog.py --fields=identity,endpoint /tmp/logfile') parser.add_argument('files', nargs='?', help="log files") parser.add_argument('--fields', help="fields to recursively show, comma-separated") parser.add_argument('--dir', help="dir to look for files if not specfied", default='/mnt/rpclogs') parser.add_argument('--matches', help="fields to recursively match, comma-separated", default='') parser.add_argument('--showencrypted', help="show encrypted things", type=bool, default=False) parser.add_argument('--tail', help="keep waiting for new data", action='store_true', default=False) parser.add_argument('--gobackpct', type=float, help="pct of file to go back (between 0 and 1, double)", default=None) SIZE_MAP = {4 : 'i', 8 : 'q'} def read_be(fd, size): buf = fd.read(size) assert size in SIZE_MAP.keys(), 'invalid size' if len(buf) == 0: return None return struct.unpack('>' + SIZE_MAP[size], buf)[0] def write_be(fd, size, value): assert size in SIZE_MAP.keys(), 'invalid size' fd.write(struct.pack('>' + SIZE_MAP[size], value)) def get_or_create_pointers(reader): # try to find pointer file. pointer_file_path = reader._filename + '.pointers' if not os.path.isfile(pointer_file_path): temp_pointer_path = reader._filename + '.tmp_pointer_file' print >> sys.stderr, 'warning: writing missing pointers file' ptr_fd = open(temp_pointer_path, 'wb') last_write = 0 recno = 0 WRITE_EVERY = 1 << 16 while reader.read(): recno += 1 offset = reader.get_offset() if offset - last_write >= WRITE_EVERY: sys.stderr.write('.') write_be(ptr_fd, 8, recno) write_be(ptr_fd, 8, offset) last_write = offset ptr_fd.close() reader.seek_to_start() sys.stderr.write('DONE\n') os.rename(temp_pointer_path, pointer_file_path) return PointerRecord(pointer_file_path) class PointerRecord(object): def __init__(self, pointer_file_path): self._pointers = [] pointer_fd = open(pointer_file_path, 'rb') while True: key = read_be(pointer_fd, 8) if key is None: break value = read_be(pointer_fd, 8) self._pointers.append((key, value)) pointer_fd.close() def get_back_offset_percent(self, go_back_percent): go_back = len(self._pointers) * go_back_percent key, offset = self._pointers[-int(go_back) - 1] # negative indexing requires subtracting 1 return offset class RecordReader(object): def __init__(self, filename, go_back_percent=None): self._filename = filename self._fd = open(filename, 'rb') self._pointer_record = None # read the header header_version = read_be(self._fd, 4) assert header_version == 0 if go_back_percent is not None: self._pointer_record = get_or_create_pointers(self) # calculate offset we want to go to offset = self._pointer_record.get_back_offset_percent(go_back_percent) print >> sys.stderr, 'seeking to %ld' % offset self._fd.seek(offset, 0) def read(self): FILE_FOOTER = 0 REVERSE_POINTER = 1 DATA_UNCOMPRESSED = 2 DATA_COMPRESSED = 3 record_type = read_be(self._fd, 4) if record_type != DATA_UNCOMPRESSED: assert record_type == FILE_FOOTER or record_type is None return None else: length = read_be(self._fd, 4) buf = u'' if length: buf = self._fd.read(length) buf = buf.decode('utf-8') return buf def seek_to_start(self): self._fd.seek(4, 0) def get_offset(self): return self._fd.tell() def iso_time_string(epoch_ms): '''Returns epoch_ms in log4j/Java's ISO 8601 format.''' # ISO 8601 output ms = epoch_ms % 1000 seconds = epoch_ms / 1000 tuple_time = time.gmtime(seconds) timezone_suffix = 'Z' time_string = time.strftime('%Y-%m-%d %H:%M:%S', tuple_time) + ',%03d' % (ms) + timezone_suffix return time_string def printj(obj): # reformat timestamps to match log4j (ISO 8061) if 'metadata' in obj and 'timestamp' in obj['metadata']: obj['metadata']['timestampString'] = iso_time_string(obj['metadata']['timestamp']) print json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')) def findall(obj, field_values, matching_fields=None): if matching_fields == None: matching_fields = defaultdict(list) for k, v in obj.iteritems(): for fieldname, fieldvalue in field_values: if re.match(fieldname, k): if v in matching_fields[k]: continue if type(v) == unicode or type(v) == str: if fieldvalue: if re.match(fieldvalue, v): matching_fields[k].append(v) else: pass else: matching_fields[k].append(v) else: matching_fields[k].append(v) elif type(v) == dict: findall(v, field_values, matching_fields) return matching_fields def removecrypt(obj): if not type(obj) == dict: return obj for k, v in obj.iteritems(): if (('encrypted') in k.lower()) or 'publickey' in k.lower() or k == 'signature': obj[k] = 'use --showencrypted to display' elif type(v) == dict: obj[k] = removecrypt(v) elif type(v) == list: obj[k] = map(removecrypt, v) return obj def make(fields): field_values = None if fields: field_values = [] for f in fields: parts = f.split('=') if len(parts) == 1: field_values.append((parts[0], None)) else: field_values.append(parts) return field_values def execute_matches(record, match_values): assert len(match_values) > 0 match_count = 0 for key, value in record.iteritems(): if type(value) == dict: if execute_matches(value, match_values): return True else: for field_re, value_re in match_values: if re.match(field_re, key): if not re.match(value_re, value): # this doesn't match but there might be a matching value somewhere else in the structure break match_count += 1 if match_count == len(match_values): return True return False def main(files, fields, matches, showEncrypted): field_values = make(fields) # Convert matches string argument to list of tuples match_values = [] if args.matches: for match in args.matches.split(','): field_re, value_re = match.split('=') match_values.append((field_re, value_re)) if args.tail and not args.gobackpct: args.gobackpct = 0.0 for fn in files: rr = RecordReader(fn, args.gobackpct) buf = 1 while True: buf = rr.read() if not buf: if args.tail: time.sleep(0.25) continue else: break log = json.loads(buf) data = log['payload'] data['request'] = json.loads(data['request']) if not data['request']: data['request'] = {} if not showEncrypted: removecrypt(log) # Check if the match values match this record if len(match_values) > 0 and not execute_matches(log, match_values): continue if not fields: print print '*' * 80 print printj(log) else: dat = findall(log, field_values) if dat: print print '*' * 80 print printj(dat) if __name__ == '__main__': args = parser.parse_args() if not args.files: possible_files = [] for f in os.listdir(args.dir): if (f.startswith('rpclog.') or f.startswith('mitro_default_rpc_log.')) and not f.endswith(POINTER_FILE_SUFFIX): possible_files.append(os.path.join(args.dir, f)) assert possible_files, "no files specified and could not find file in %s" % args.dir f = max(possible_files, key=os.path.getmtime) print >> sys.stderr, "no files specified, using %s" % f args.files = f exit_code = main( args.files.split(' '), None if not args.fields else set(args.fields.split(',')), args.matches, args.showencrypted, ) sys.exit(exit_code)
feliperfranca/FelipeRFranca_site
refs/heads/master
django/contrib/gis/geos/libgeos.py
154
""" This module houses the ctypes initialization procedures, as well as the notice and error handler function callbacks (get called when an error occurs in GEOS). This module also houses GEOS Pointer utilities, including get_pointer_arr(), and GEOM_PTR. """ import os, re, sys from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER from ctypes.util import find_library from django.contrib.gis.geos.error import GEOSException # Custom library path set? try: from django.conf import settings lib_path = settings.GEOS_LIBRARY_PATH except (AttributeError, EnvironmentError, ImportError): lib_path = None # Setting the appropriate names for the GEOS-C library. if lib_path: lib_names = None elif os.name == 'nt': # Windows NT libraries lib_names = ['geos_c', 'libgeos_c-1'] elif os.name == 'posix': # *NIX libraries lib_names = ['geos_c', 'GEOS'] else: raise ImportError('Unsupported OS "%s"' % os.name) # Using the ctypes `find_library` utility to find the path to the GEOS # shared library. This is better than manually specifiying each library name # and extension (e.g., libgeos_c.[so|so.1|dylib].). if lib_names: for lib_name in lib_names: lib_path = find_library(lib_name) if not lib_path is None: break # No GEOS library could be found. if lib_path is None: raise ImportError('Could not find the GEOS library (tried "%s"). ' 'Try setting GEOS_LIBRARY_PATH in your settings.' % '", "'.join(lib_names)) # Getting the GEOS C library. The C interface (CDLL) is used for # both *NIX and Windows. # See the GEOS C API source code for more details on the library function calls: # http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html lgeos = CDLL(lib_path) # The notice and error handler C function callback definitions. # Supposed to mimic the GEOS message handler (C below): # typedef void (*GEOSMessageHandler)(const char *fmt, ...); NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def notice_h(fmt, lst, output_h=sys.stdout): try: warn_msg = fmt % lst except: warn_msg = fmt output_h.write('GEOS_NOTICE: %s\n' % warn_msg) notice_h = NOTICEFUNC(notice_h) ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def error_h(fmt, lst, output_h=sys.stderr): try: err_msg = fmt % lst except: err_msg = fmt output_h.write('GEOS_ERROR: %s\n' % err_msg) error_h = ERRORFUNC(error_h) #### GEOS Geometry C data structures, and utility functions. #### # Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR class GEOSGeom_t(Structure): pass class GEOSPrepGeom_t(Structure): pass class GEOSCoordSeq_t(Structure): pass class GEOSContextHandle_t(Structure): pass # Pointers to opaque GEOS geometry structures. GEOM_PTR = POINTER(GEOSGeom_t) PREPGEOM_PTR = POINTER(GEOSPrepGeom_t) CS_PTR = POINTER(GEOSCoordSeq_t) CONTEXT_PTR = POINTER(GEOSContextHandle_t) # Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection # GEOS routines def get_pointer_arr(n): "Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer." GeomArr = GEOM_PTR * n return GeomArr() # Returns the string version of the GEOS library. Have to set the restype # explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms. geos_version = lgeos.GEOSversion geos_version.argtypes = None geos_version.restype = c_char_p # Regular expression should be able to parse version strings such as # '3.0.0rc4-CAPI-1.3.3', or '3.0.0-CAPI-1.4.1' version_regex = re.compile(r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))(rc(?P<release_candidate>\d+))?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)$') def geos_version_info(): """ Returns a dictionary containing the various version metadata parsed from the GEOS version string, including the version number, whether the version is a release candidate (and what number release candidate), and the C API version. """ ver = geos_version() m = version_regex.match(ver) if not m: raise GEOSException('Could not parse version info string "%s"' % ver) return dict((key, m.group(key)) for key in ('version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')) # Version numbers and whether or not prepared geometry support is available. _verinfo = geos_version_info() GEOS_MAJOR_VERSION = int(_verinfo['major']) GEOS_MINOR_VERSION = int(_verinfo['minor']) GEOS_SUBMINOR_VERSION = int(_verinfo['subminor']) del _verinfo GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION) GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0) if GEOS_PREPARE: # Here we set up the prototypes for the initGEOS_r and finishGEOS_r # routines. These functions aren't actually called until they are # attached to a GEOS context handle -- this actually occurs in # geos/prototypes/threadsafe.py. lgeos.initGEOS_r.restype = CONTEXT_PTR lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR] else: # When thread-safety isn't available, the initGEOS routine must be called # first. This function takes the notice and error functions, defined # as Python callbacks above, as parameters. Here is the C code that is # wrapped: # extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function); lgeos.initGEOS(notice_h, error_h) # Calling finishGEOS() upon exit of the interpreter. import atexit atexit.register(lgeos.finishGEOS)
denys-duchier/django
refs/heads/master
tests/sessions_tests/tests.py
16
import base64 import os import shutil import string import sys import tempfile import unittest from datetime import timedelta from http import cookies from django.conf import settings from django.contrib.sessions.backends.base import UpdateError from django.contrib.sessions.backends.cache import SessionStore as CacheSession from django.contrib.sessions.backends.cached_db import \ SessionStore as CacheDBSession from django.contrib.sessions.backends.db import SessionStore as DatabaseSession from django.contrib.sessions.backends.file import SessionStore as FileSession from django.contrib.sessions.backends.signed_cookies import \ SessionStore as CookieSession from django.contrib.sessions.exceptions import InvalidSessionKey from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sessions.models import Session from django.contrib.sessions.serializers import ( JSONSerializer, PickleSerializer, ) from django.core import management from django.core.cache import caches from django.core.cache.backends.base import InvalidCacheBackendError from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.http import HttpResponse from django.test import ( RequestFactory, TestCase, ignore_warnings, override_settings, ) from django.test.utils import patch_logger from django.utils import timezone from .models import SessionStore as CustomDatabaseSession class SessionTestsMixin: # This does not inherit from TestCase to avoid any tests being run with this # class, which wouldn't work, and to allow different TestCase subclasses to # be used. backend = None # subclasses must specify def setUp(self): self.session = self.backend() def tearDown(self): # NB: be careful to delete any sessions created; stale sessions fill up # the /tmp (with some backends) and eventually overwhelm it after lots # of runs (think buildbots) self.session.delete() def test_new_session(self): self.assertFalse(self.session.modified) self.assertFalse(self.session.accessed) def test_get_empty(self): self.assertIsNone(self.session.get('cat')) def test_store(self): self.session['cat'] = "dog" self.assertTrue(self.session.modified) self.assertEqual(self.session.pop('cat'), 'dog') def test_pop(self): self.session['some key'] = 'exists' # Need to reset these to pretend we haven't accessed it: self.accessed = False self.modified = False self.assertEqual(self.session.pop('some key'), 'exists') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertIsNone(self.session.get('some key')) def test_pop_default(self): self.assertEqual(self.session.pop('some key', 'does not exist'), 'does not exist') self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_pop_default_named_argument(self): self.assertEqual(self.session.pop('some key', default='does not exist'), 'does not exist') self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_pop_no_default_keyerror_raised(self): with self.assertRaises(KeyError): self.session.pop('some key') def test_setdefault(self): self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar') self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_update(self): self.session.update({'update key': 1}) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('update key', None), 1) def test_has_key(self): self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertIn('some key', self.session) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_values(self): self.assertEqual(list(self.session.values()), []) self.assertTrue(self.session.accessed) self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.values()), [1]) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_keys(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.keys()), ['x']) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_items(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_clear(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.session.clear() self.assertEqual(list(self.session.items()), []) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_save(self): self.session.save() self.assertTrue(self.session.exists(self.session.session_key)) def test_delete(self): self.session.save() self.session.delete(self.session.session_key) self.assertFalse(self.session.exists(self.session.session_key)) def test_flush(self): self.session['foo'] = 'bar' self.session.save() prev_key = self.session.session_key self.session.flush() self.assertFalse(self.session.exists(prev_key)) self.assertNotEqual(self.session.session_key, prev_key) self.assertIsNone(self.session.session_key) self.assertTrue(self.session.modified) self.assertTrue(self.session.accessed) def test_cycle(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_key = self.session.session_key prev_data = list(self.session.items()) self.session.cycle_key() self.assertFalse(self.session.exists(prev_key)) self.assertNotEqual(self.session.session_key, prev_key) self.assertEqual(list(self.session.items()), prev_data) def test_cycle_with_no_session_cache(self): self.assertFalse(hasattr(self.session, '_session_cache')) self.session.cycle_key() def test_save_doesnt_clear_data(self): self.session['a'] = 'b' self.session.save() self.assertEqual(self.session['a'], 'b') def test_invalid_key(self): # Submitting an invalid session key (either by guessing, or if the db has # removed the key) results in a new key being generated. try: session = self.backend('1') session.save() self.assertNotEqual(session.session_key, '1') self.assertIsNone(session.get('cat')) session.delete() finally: # Some backends leave a stale cache entry for the invalid # session key; make sure that entry is manually deleted session.delete('1') def test_session_key_empty_string_invalid(self): """Falsey values (Such as an empty string) are rejected.""" self.session._session_key = '' self.assertIsNone(self.session.session_key) def test_session_key_too_short_invalid(self): """Strings shorter than 8 characters are rejected.""" self.session._session_key = '1234567' self.assertIsNone(self.session.session_key) def test_session_key_valid_string_saved(self): """Strings of length 8 and up are accepted and stored.""" self.session._session_key = '12345678' self.assertEqual(self.session.session_key, '12345678') def test_session_key_is_read_only(self): def set_session_key(session): session.session_key = session._get_new_session_key() with self.assertRaises(AttributeError): set_session_key(self.session) # Custom session expiry def test_default_expiry(self): # A normal session has a max age equal to settings self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) # So does a custom session with an idle expiration time of 0 (but it'll # expire at browser close) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_custom_expiry_seconds(self): modification = timezone.now() self.session.set_expiry(10) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_timedelta(self): modification = timezone.now() # Mock timezone.now, because set_expiry calls it on this code path. original_now = timezone.now try: timezone.now = lambda: modification self.session.set_expiry(timedelta(seconds=10)) finally: timezone.now = original_now date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_datetime(self): modification = timezone.now() self.session.set_expiry(modification + timedelta(seconds=10)) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_get_expire_at_browser_close(self): # Tests get_expire_at_browser_close with different settings and different # set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertFalse(self.session.get_expire_at_browser_close()) with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertTrue(self.session.get_expire_at_browser_close()) def test_decode(self): # Ensure we can decode what we encode data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session.decode(encoded), data) def test_decode_failure_logged_to_security(self): bad_encode = base64.b64encode(b'flaskdj:alkdjf') with patch_logger('django.security.SuspiciousSession', 'warning') as calls: self.assertEqual({}, self.session.decode(bad_encode)) # check that the failed decode is logged self.assertEqual(len(calls), 1) self.assertIn('corrupted', calls[0]) def test_actual_expiry(self): # this doesn't work with JSONSerializer (serializing timedelta) with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'): self.session = self.backend() # reinitialize after overriding settings # Regression test for #19200 old_session_key = None new_session_key = None try: self.session['foo'] = 'bar' self.session.set_expiry(-timedelta(seconds=10)) self.session.save() old_session_key = self.session.session_key # With an expiry date in the past, the session expires instantly. new_session = self.backend(self.session.session_key) new_session_key = new_session.session_key self.assertNotIn('foo', new_session) finally: self.session.delete(old_session_key) self.session.delete(new_session_key) def test_session_load_does_not_create_record(self): """ Loading an unknown session key does not create a session record. Creating session records on load is a DOS vulnerability. """ session = self.backend('someunknownkey') session.load() self.assertFalse(session.exists(session.session_key)) # provided unknown key was cycled, not reused self.assertNotEqual(session.session_key, 'someunknownkey') def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self): """ Sessions shouldn't be resurrected by a concurrent request. """ # Create new session. s1 = self.backend() s1['test_data'] = 'value1' s1.save(must_create=True) # Logout in another context. s2 = self.backend(s1.session_key) s2.delete() # Modify session in first context. s1['test_data'] = 'value2' with self.assertRaises(UpdateError): # This should throw an exception as the session is deleted, not # resurrect the session. s1.save() self.assertEqual(s1.load(), {}) class DatabaseSessionTests(SessionTestsMixin, TestCase): backend = DatabaseSession session_engine = 'django.contrib.sessions.backends.db' @property def model(self): return self.backend.get_model_class() def test_session_str(self): "Session repr should be the session key." self.session['x'] = 1 self.session.save() session_key = self.session.session_key s = self.model.objects.get(session_key=session_key) self.assertEqual(str(s), session_key) def test_session_get_decoded(self): """ Test we can use Session.get_decoded to retrieve data stored in normal way """ self.session['x'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.get_decoded(), {'x': 1}) def test_sessionmanager_save(self): """ Test SessionManager.save method """ # Create a session self.session['y'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) # Change it self.model.objects.save(s.session_key, {'y': 2}, s.expire_date) # Clear cache, so that it will be retrieved from DB del self.session._session_cache self.assertEqual(self.session['y'], 2) def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ self.assertEqual(0, self.model.objects.count()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the database before clearsessions... self.assertEqual(2, self.model.objects.count()) with override_settings(SESSION_ENGINE=self.session_engine): management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, self.model.objects.count()) @override_settings(USE_TZ=True) class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests): pass class CustomDatabaseSessionTests(DatabaseSessionTests): backend = CustomDatabaseSession session_engine = 'sessions_tests.models' def test_extra_session_field(self): # Set the account ID to be picked up by a custom session storage # and saved to a custom session model database column. self.session['_auth_user_id'] = 42 self.session.save() # Make sure that the customized create_model_instance() was called. s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.account_id, 42) # Make the session "anonymous". self.session.pop('_auth_user_id') self.session.save() # Make sure that save() on an existing session did the right job. s = self.model.objects.get(session_key=self.session.session_key) self.assertIsNone(s.account_id) class CacheDBSessionTests(SessionTestsMixin, TestCase): backend = CacheDBSession def test_exists_searches_cache_first(self): self.session.save() with self.assertNumQueries(0): self.assertTrue(self.session.exists(self.session.session_key)) # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) @override_settings(SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS. with self.assertRaises(InvalidCacheBackendError): self.backend() @override_settings(USE_TZ=True) class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests): pass # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class FileSessionTests(SessionTestsMixin, unittest.TestCase): backend = FileSession def setUp(self): # Do file session tests in an isolated directory, and kill it after we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp() # Reset the file session backend's internal caches if hasattr(self.backend, '_storage_path'): del self.backend._storage_path super().setUp() def tearDown(self): super().tearDown() settings.SESSION_FILE_PATH = self.original_session_file_path shutil.rmtree(self.temp_session_store) @override_settings( SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer") def test_configuration_check(self): del self.backend._storage_path # Make sure the file backend checks for a good storage dir with self.assertRaises(ImproperlyConfigured): self.backend() def test_invalid_key_backslash(self): # Ensure we don't allow directory-traversal. # This is tested directly on _key_to_file, as load() will swallow # a SuspiciousOperation in the same way as an IOError - by creating # a new session, making it unclear whether the slashes were detected. with self.assertRaises(InvalidSessionKey): self.backend()._key_to_file("a\\b\\c") def test_invalid_key_forwardslash(self): # Ensure we don't allow directory-traversal with self.assertRaises(InvalidSessionKey): self.backend()._key_to_file("a/b/c") @override_settings( SESSION_ENGINE="django.contrib.sessions.backends.file", SESSION_COOKIE_AGE=0, ) def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ storage_path = self.backend._get_storage_path() file_prefix = settings.SESSION_COOKIE_NAME def count_sessions(): return len([ session_file for session_file in os.listdir(storage_path) if session_file.startswith(file_prefix) ]) self.assertEqual(0, count_sessions()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # One object in the present without an expiry (should be deleted since # its modification time + SESSION_COOKIE_AGE will be in the past when # clearsessions runs). other_session2 = self.backend() other_session2['foo'] = 'bar' other_session2.save() # Three sessions are in the filesystem before clearsessions... self.assertEqual(3, count_sessions()) management.call_command('clearsessions') # ... and two are deleted. self.assertEqual(1, count_sessions()) class CacheSessionTests(SessionTestsMixin, unittest.TestCase): backend = CacheSession # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) def test_default_cache(self): self.session.save() self.assertIsNotNone(caches['default'].get(self.session.cache_key)) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'sessions': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'session', }, }, SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # Re-initialize the session backend to make use of overridden settings. self.session = self.backend() self.session.save() self.assertIsNone(caches['default'].get(self.session.cache_key)) self.assertIsNotNone(caches['sessions'].get(self.session.cache_key)) def test_create_and_save(self): self.session = self.backend() self.session.create() self.session.save() self.assertIsNotNone(caches['default'].get(self.session.cache_key)) class SessionMiddlewareTests(TestCase): @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['secure']) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertIn( cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME]) ) @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertNotIn(cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME])) def test_session_save_on_500(self): request = RequestFactory().get('/') response = HttpResponse('Horrible error') response.status_code = 500 middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) # The value wasn't saved above. self.assertNotIn('hello', request.session.load()) def test_session_update_error_redirect(self): path = '/foo/' request = RequestFactory().get(path) response = HttpResponse() middleware = SessionMiddleware() request.session = DatabaseSession() request.session.save(must_create=True) request.session.delete() msg = ( "The request's session was deleted before the request completed. " "The user may have logged out in a concurrent request, for example." ) with self.assertRaisesMessage(SuspiciousOperation, msg): # Handle the response through the middleware. It will try to save # the deleted session which will cause an UpdateError that's caught # and raised as a SuspiciousOperation. middleware.process_response(request, response) def test_session_delete_on_end(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # The cookie was deleted, not recreated. # A deleted cookie header looks like: # Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/ self.assertEqual( 'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; ' 'Max-Age=0; Path=/'.format( settings.SESSION_COOKIE_NAME, '""' if sys.version_info >= (3, 5) else '', ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) @override_settings(SESSION_COOKIE_DOMAIN='.example.local', SESSION_COOKIE_PATH='/example/') def test_session_delete_on_end_with_custom_domain_and_path(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # The cookie was deleted, not recreated. # A deleted cookie header with a custom domain and path looks like: # Set-Cookie: sessionid=; Domain=.example.local; # expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; # Path=/example/ self.assertEqual( 'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, ' '01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/example/'.format( settings.SESSION_COOKIE_NAME, '""' if sys.version_info >= (3, 5) else '', ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) def test_flush_empty_without_session_cookie_doesnt_set_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # A cookie should not be set. self.assertEqual(response.cookies, {}) # The session is accessed so "Vary: Cookie" should be set. self.assertEqual(response['Vary'], 'Cookie') def test_empty_session_saved(self): """ If a session is emptied of data but still has a key, it should still be updated. """ request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Set a session key and some data. middleware.process_request(request) request.session['foo'] = 'bar' # Handle the response through the middleware. response = middleware.process_response(request, response) self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),)) # A cookie should be set, along with Vary: Cookie. self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response['Vary'], 'Cookie') # Empty the session data. del request.session['foo'] # Handle the response through the middleware. response = HttpResponse('Session test') response = middleware.process_response(request, response) self.assertEqual(dict(request.session.values()), {}) session = Session.objects.get(session_key=request.session.session_key) self.assertEqual(session.get_decoded(), {}) # While the session is empty, it hasn't been flushed so a cookie should # still be set, along with Vary: Cookie. self.assertGreater(len(request.session.session_key), 8) self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response['Vary'], 'Cookie') # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class CookieSessionTests(SessionTestsMixin, unittest.TestCase): backend = CookieSession def test_save(self): """ This test tested exists() in the other session backends, but that doesn't make sense for us. """ pass def test_cycle(self): """ This test tested cycle_key() which would create a new session key for the same session data. But we can't invalidate previously signed cookies (other than letting them expire naturally) so testing for this behavior is meaningless. """ pass @unittest.expectedFailure def test_actual_expiry(self): # The cookie backend doesn't handle non-default expiry dates, see #19201 super().test_actual_expiry() def test_unpickling_exception(self): # signed_cookies backend should handle unpickle exceptions gracefully # by creating a new session self.assertEqual(self.session.serializer, JSONSerializer) self.session.save() self.session.serializer = PickleSerializer self.session.load() @unittest.skip("Cookie backend doesn't have an external store to create records in.") def test_session_load_does_not_create_record(self): pass @unittest.skip("CookieSession is stored in the client and there is no way to query it.") def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self): pass
aidanlister/django
refs/heads/master
django/middleware/common.py
145
import logging import re from django import http from django.conf import settings from django.core import urlresolvers from django.core.exceptions import PermissionDenied from django.core.mail import mail_managers from django.utils.cache import get_conditional_response, set_response_etag from django.utils.encoding import force_text logger = logging.getLogger('django.request') class CommonMiddleware(object): """ "Common" middleware for taking care of some basic operations: - Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS - URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings, this middleware appends missing slashes and/or prepends missing "www."s. - If APPEND_SLASH is set and the initial URL doesn't end with a slash, and it is not found in urlpatterns, a new URL is formed by appending a slash at the end. If this new URL is found in urlpatterns, then an HTTP-redirect is returned to this new URL; otherwise the initial URL is processed as usual. This behavior can be customized by subclassing CommonMiddleware and overriding the response_redirect_class attribute. - ETags: If the USE_ETAGS setting is set, ETags will be calculated from the entire page content and Not Modified responses will be returned appropriately. """ response_redirect_class = http.HttpResponsePermanentRedirect def process_request(self, request): """ Check for denied User-Agents and rewrite the URL based on settings.APPEND_SLASH and settings.PREPEND_WWW """ # Check for denied User-Agents if 'HTTP_USER_AGENT' in request.META: for user_agent_regex in settings.DISALLOWED_USER_AGENTS: if user_agent_regex.search(request.META['HTTP_USER_AGENT']): raise PermissionDenied('Forbidden user agent') # Check for a redirect based on settings.PREPEND_WWW host = request.get_host() if settings.PREPEND_WWW and host and not host.startswith('www.'): host = 'www.' + host # Check if we also need to append a slash so we can do it all # with a single redirect. if self.should_redirect_with_slash(request): path = self.get_full_path_with_slash(request) else: path = request.get_full_path() return self.response_redirect_class('%s://%s%s' % (request.scheme, host, path)) def should_redirect_with_slash(self, request): """ Return True if settings.APPEND_SLASH is True and appending a slash to the request path turns an invalid path into a valid one. """ if settings.APPEND_SLASH and not request.get_full_path().endswith('/'): urlconf = getattr(request, 'urlconf', None) return ( not urlresolvers.is_valid_path(request.path_info, urlconf) and urlresolvers.is_valid_path('%s/' % request.path_info, urlconf) ) return False def get_full_path_with_slash(self, request): """ Return the full path of the request with a trailing slash appended. Raise a RuntimeError if settings.DEBUG is True and request.method is GET, PUT, or PATCH. """ new_path = request.get_full_path(force_append_slash=True) if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'): raise RuntimeError( "You called this URL via %(method)s, but the URL doesn't end " "in a slash and you have APPEND_SLASH set. Django can't " "redirect to the slash URL while maintaining %(method)s data. " "Change your form to point to %(url)s (note the trailing " "slash), or set APPEND_SLASH=False in your Django settings." % { 'method': request.method, 'url': request.get_host() + new_path, } ) return new_path def process_response(self, request, response): """ Calculate the ETag, if needed. When the status code of the response is 404, it may redirect to a path with an appended slash if should_redirect_with_slash() returns True. """ # If the given URL is "Not Found", then check if we should redirect to # a path with a slash appended. if response.status_code == 404: if self.should_redirect_with_slash(request): return self.response_redirect_class(self.get_full_path_with_slash(request)) if settings.USE_ETAGS: if not response.has_header('ETag'): set_response_etag(response) if response.has_header('ETag'): return get_conditional_response( request, etag=response['ETag'], response=response, ) return response class BrokenLinkEmailsMiddleware(object): def process_response(self, request, response): """ Send broken link emails for relevant 404 NOT FOUND responses. """ if response.status_code == 404 and not settings.DEBUG: domain = request.get_host() path = request.get_full_path() referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace') if not self.is_ignorable_request(request, path, domain, referer): ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace') ip = request.META.get('REMOTE_ADDR', '<none>') mail_managers( "Broken %slink on %s" % ( ('INTERNAL ' if self.is_internal_request(domain, referer) else ''), domain ), "Referrer: %s\nRequested URL: %s\nUser agent: %s\n" "IP address: %s\n" % (referer, path, ua, ip), fail_silently=True) return response def is_internal_request(self, domain, referer): """ Returns True if the referring URL is the same domain as the current request. """ # Different subdomains are treated as different domains. return bool(re.match("^https?://%s/" % re.escape(domain), referer)) def is_ignorable_request(self, request, uri, domain, referer): """ Return True if the given request *shouldn't* notify the site managers according to project settings or in three specific situations: - If the referer is empty. - If a '?' in referer is identified as a search engine source. - If the referer is equal to the current URL (assumed to be a malicious bot). """ full_url = "%s://%s/%s" % (request.scheme, domain, uri.lstrip('/')) if (not referer or (not self.is_internal_request(domain, referer) and '?' in referer) or (referer == uri or referer == full_url)): return True return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
tianzhihen/SublimeJEDI
refs/heads/master
jedi/parser/pgen2/pgen.py
49
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. # Pgen imports from . import grammar from jedi.parser import token from jedi.parser import tokenize class ParserGenerator(object): def __init__(self, filename, stream=None): close_stream = None if stream is None: stream = open(filename) close_stream = stream.close self.filename = filename self.stream = stream self.generator = tokenize.generate_tokens(stream.readline) self.gettoken() # Initialize lookahead self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() self.first = {} # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): c = grammar.Grammar() names = list(self.dfas.keys()) names.sort() names.remove(self.startsymbol) names.insert(0, self.startsymbol) for name in names: i = 256 + len(c.symbol2number) c.symbol2number[name] = i c.number2symbol[i] = name for name in names: dfa = self.dfas[name] states = [] for state in dfa: arcs = [] for label, next in state.arcs.items(): arcs.append((self.make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) states.append(arcs) c.states.append(states) c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) c.start = c.symbol2number[self.startsymbol] return c def make_first(self, c, name): rawfirst = self.first[name] first = {} for label in rawfirst: ilabel = self.make_label(c, label) ##assert ilabel not in first # XXX failed on <> ... != first[ilabel] = 1 return first def make_label(self, c, label): # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): # Either a symbol name or a named token if label in c.symbol2number: # A symbol name (a non-terminal) if label in c.symbol2label: return c.symbol2label[label] else: c.labels.append((c.symbol2number[label], None)) c.symbol2label[label] = ilabel return ilabel else: # A named token (NAME, NUMBER, STRING) itoken = getattr(token, label, None) assert isinstance(itoken, int), label assert itoken in token.tok_name, label if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel else: # Either a keyword or an operator assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): # A keyword if value in c.keywords: return c.keywords[value] else: c.labels.append((token.NAME, value)) c.keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) itoken = token.opmap[value] # Fails if unknown token if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel def addfirstsets(self): names = list(self.dfas.keys()) names.sort() for name in names: if name not in self.first: self.calcfirst(name) #print name, self.first[name].keys() def calcfirst(self, name): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] totalset = {} overlapcheck = {} for label, next in state.arcs.items(): if label in self.dfas: if label in self.first: fset = self.first[label] if fset is None: raise ValueError("recursion for rule %r" % name) else: self.calcfirst(label) fset = self.first[label] totalset.update(fset) overlapcheck[label] = fset else: totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" " first sets of %s as well as %s" % (name, symbol, label, inverse[symbol])) inverse[symbol] = label self.first[name] = totalset def parse(self): dfas = {} startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER while self.type != token.ENDMARKER: while self.type == token.NEWLINE: self.gettoken() # RULE: NAME ':' RHS NEWLINE name = self.expect(token.NAME) self.expect(token.OP, ":") a, z = self.parse_rhs() self.expect(token.NEWLINE) #self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) #self.dump_dfa(name, dfa) # oldlen = len(dfa) self.simplify_dfa(dfa) # newlen = len(dfa) dfas[name] = dfa #print name, oldlen, newlen if startsymbol is None: startsymbol = name return dfas, startsymbol def make_dfa(self, start, finish): # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for # values. assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): base = {} addclosure(state, base) return base def addclosure(state, base): assert isinstance(state, NFAState) if state in base: return base[state] = 1 for label, next in state.arcs: if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating arcs = {} for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: addclosure(next, arcs.setdefault(label, {})) for label, nfaset in arcs.items(): for st in states: if st.nfaset == nfaset: break else: st = DFAState(nfaset, finish) states.append(st) state.addarc(st, label) return states # List of DFAState instances; first one is start def dump_nfa(self, name, start, finish): print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): print(" State", i, state is finish and "(final)" or "") for label, next in state.arcs: if next in todo: j = todo.index(next) else: j = len(todo) todo.append(next) if label is None: print(" -> %d" % j) else: print(" %s -> %d" % (label, j)) def dump_dfa(self, name, dfa): print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") for label, next in state.arcs.items(): print(" %s -> %d" % (label, dfa.index(next))) def simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and # unify them, until things stop changing. # dfa is a list of DFAState instances changes = True while changes: changes = False for i, state_i in enumerate(dfa): for j in range(i + 1, len(dfa)): state_j = dfa[j] if state_i == state_j: #print " unify", i, j del dfa[j] for state in dfa: state.unifystate(state_j, state_i) changes = True break def parse_rhs(self): # RHS: ALT ('|' ALT)* a, z = self.parse_alt() if self.value != "|": return a, z else: aa = NFAState() zz = NFAState() aa.addarc(a) z.addarc(zz) while self.value == "|": self.gettoken() a, z = self.parse_alt() aa.addarc(a) z.addarc(zz) return aa, zz def parse_alt(self): # ALT: ITEM+ a, b = self.parse_item() while (self.value in ("(", "[") or self.type in (token.NAME, token.STRING)): c, d = self.parse_item() b.addarc(c) b = d return a, b def parse_item(self): # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, "]") a.addarc(z) return a, z else: a, z = self.parse_atom() value = self.value if value not in ("+", "*"): return a, z self.gettoken() z.addarc(a) if value == "+": return a, z else: return a, a def parse_atom(self): # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, ")") return a, z elif self.type in (token.NAME, token.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) self.gettoken() return a, z else: self.raise_error("expected (...) or NAME or STRING, got %s/%s", self.type, self.value) def expect(self, type, value=None): if self.type != type or (value is not None and self.value != value): self.raise_error("expected %s/%s, got %s/%s", type, value, self.type, self.value) value = self.value self.gettoken() return value def gettoken(self): tup = next(self.generator) while tup[0] in (token.COMMENT, token.NL): tup = next(self.generator) self.type, self.value, self.begin, prefix = tup #print tokenize.tok_name[self.type], repr(self.value) def raise_error(self, msg, *args): if args: try: msg = msg % args except: msg = " ".join([msg] + list(map(str, args))) line = open(self.filename).readlines()[self.begin[0]] raise SyntaxError(msg, (self.filename, self.begin[0], self.begin[1], line)) class NFAState(object): def __init__(self): self.arcs = [] # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) class DFAState(object): def __init__(self, nfaset, final): assert isinstance(nfaset, dict) assert isinstance(next(iter(nfaset)), NFAState) assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState def addarc(self, next, label): assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) self.arcs[label] = next def unifystate(self, old, new): for label, next in self.arcs.items(): if next is old: self.arcs[label] = new def __eq__(self, other): # Equality test -- ignore the nfaset instance variable assert isinstance(other, DFAState) if self.isfinal != other.isfinal: return False # Can't just return self.arcs == other.arcs, because that # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False for label, next in self.arcs.items(): if next is not other.arcs.get(label): return False return True __hash__ = None # For Py3 compatibility. def generate_grammar(filename="Grammar.txt"): p = ParserGenerator(filename) return p.make_grammar()
mcardillo55/goxta
refs/heads/master
goxta.py
1
"""goxta -- realtime technical analysis on mtGox trading""" from indicators import * from goxapi import * import strategy import numpy as np import argparse import time import math import sys import urllib2 import thread import random BTCCHARTS_URL = "http://api.bitcoincharts.com/v1/trades.csv?symbol=mtgoxUSD" class IntervalList: """Main data structure containing all Interval objects If trading indicators are desired, they should be passed to the 'indicators' argument as a list of objects during the IntervalList instantiation call e.g. newList = IntervalList(indicators=(RSI())) """ def __init__(self, intervalPeriod=1, indicators=()): self.intervalPeriod = intervalPeriod * 60 # interval period in secs self.intList = [] self.indicators = indicators def addInterval(self, newInterval): self.intList.append(newInterval) def empty(self): if self.intList == []: return True else: return False def closings(self): """Returns a numpy array of all interval closing prices Used by several indicators in indicators.py """ closeList = [] for interval in self.intList: closeList.append(interval.close) return np.array(closeList) def getIntervalPeriod(self): return self.intervalPeriod def printIntervalAt(self, n): self.intList[n].printInterval() self.printIndicators() def printFullList(self): for interval in self.intList: print "INTERVAL ID: %d" % interval.intervalID interval.printTrades() def printIndicators(self): closings = self.closings() indStr = "" for curInd in self.indicators: indStr = indStr + curInd.asStr(closings) + "\t" print indStr class Interval: """Basic trade period unit Contains essential trade prices, as well as all Trade objects that took place during the interval interval empty is passed True if we are creating an interval with no trading activity open is either the opening trade, or the previous Interval if there is no activity """ def __init__(self, open, empty=False): if (empty): self.intervalID = open.intervalID + 1 self.open = open.close self.close = open.close self.trades = [] self.high = open.close self.low = open.close self.volume = 0 else: self.intervalID = open.intervalID self.open = open.price self.close = open.price self.trades = [open] self.high = open.price self.low = open.price self.volume = open.volume def addTrade(self, trade): self.trades.append(trade) self.volume = self.volume + trade.volume self.close = trade.price if trade.price > self.high: self.high = trade.price if trade.price < self.low: self.low = trade.price def printInterval(self): print ("ID: %d\tOpen: %.6g\tClose: %.6g\tHigh: %.6g\t" "Low: %.6g\tVol: %.6g") % (self.intervalID, self.open, self.close, self.high, self.low, self.volume) def printTrades(self): for trade in self.trades: trade.printTrade() class Trade: """Object to contain all essential data about a specific trade""" def __init__(self, (tradeData), intervalPeriod): # time is unix epoch time self.time = int(tradeData[0]) self.price = float(tradeData[1]) self.volume = float(tradeData[2]) self.intervalID = int(math.floor(self.time / intervalPeriod)) def printTrade(self): print "Time: %s\tPrice: %f\tVolume: %f" % (time.ctime(self.time), self.price, self.volume) parser = argparse.ArgumentParser(description="Analyze and trade Bitcoin" "on mtGox") parser.add_argument("--hist", dest="history", action="store_true", help="Grab recent bitcoincharts.com trade history") parser.add_argument("--test", dest="testMode", action="store_true", help="Create consistant fake trade data for testing") args = parser.parse_args() # creating main interval list object intList = IntervalList(intervalPeriod=15, indicators=(SMA(40), EMA(20), RSI(), MACD())) if (args.history): print "Fetching history from bitcoincharts.com..." start_data = urllib2.urlopen(BTCCHARTS_URL) for line in reversed(start_data.readlines()): curTrade = Trade(tuple(line.split(",")), intList.getIntervalPeriod()) #curTrade.printTrade() if intList.empty() or curTrade.intervalID != curInterval.intervalID: curInterval = Interval(curTrade) if not intList.empty(): intList.printIntervalAt(-1) idDiff = (curInterval.intervalID - intList.intList[-1].intervalID) if (idDiff != 1): for x in range(1, idDiff): intList.addInterval(Interval(intList.intList[-1], True)) intList.printIntervalAt(-1) intList.addInterval(curInterval) else: curInterval.addTrade(curTrade) print "Complete!" mtgox = GoxAPI() mtgox.connect() mtgoxThread = GoxAPI() mtgoxThread.connect() tradeStrategy = strategy.Strategy(intList) thread.start_new_thread(tradeStrategy.run, (mtgoxThread, )) while True: """Main program loop Will keep recieving trade data until process dies """ if (args.testMode): time.sleep(5) mtTrade = {'date': time.time(), 'price': random.uniform(88, 98), 'amount': random.uniform(0.01, 3.0)} else: try: mtdata = mtgox.getTrades() except KeyboardInterrupt: print "Received Ctrl+C. Closing goxta..." sys.exit(0) except: continue try: # this is the 'trades' channel identifier, according to API docs if (mtdata['channel'] == "dbf1dee9-4f2e-4a08-8cb7-748919a71b21" and mtdata['trade']['price_currency'] == "USD"): mtTrade = mtdata['trade'] else: mtTrade = None except: continue if (mtTrade): curTrade = Trade((mtTrade['date'], mtTrade['price'], mtTrade['amount']), intList.getIntervalPeriod()) # checking if we should create the first Interval in the list or # if the trade time has passed the the interval end time, in # which we will need to need to create a new Interval if (intList.empty() or curTrade.intervalID != curInterval.intervalID): curInterval = Interval(curTrade) # checking for the case in which we need to create the # first Interval for the list if not intList.empty(): intList.printIntervalAt(-1) idDiff = (curInterval.intervalID - intList.intList[-1].intervalID) if (idDiff != 1): for x in range(1, idDiff): intList.addInterval(Interval(intList.intList[-1], True)) intList.printIntervalAt(-1) intList.addInterval(curInterval) else: curInterval.addTrade(curTrade) curTrade.printTrade()
vnsofthe/odoo-dev
refs/heads/master
addons/payment_paypal/models/__init__.py
419
# -*- coding: utf-8 -*- import paypal import res_company
ForgottenKahz/CloudOPC
refs/heads/master
venv/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py
80
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import imp import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile from pkgutil import get_importer PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib._bootstrap as importlib_bootstrap else: importlib_bootstrap = None try: import parser except ImportError: pass import pip._vendor.packaging.version import pip._vendor.packaging.specifiers packaging = pip._vendor.packaging class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. That class should be used directly instead of attempting to " "iterate over the result.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """An already-installed version conflicts with the requested version""" class DistributionNotFound(ResolutionError): """A requested distribution was not found""" class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) else: return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: #msg = ("The '%s' distribution was not found on this " # "system, and is required by this application.") #raise DistributionNotFound(msg % req) # unfortunately, zc.buildout uses a str(err) # to get the name of the distribution here.. raise DistributionNotFound(req) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency tmpl = "%s is installed but %s is required by %s" args = dist, req, list(required_by.get(req, [])) raise VersionConflict(tmpl % args) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError: v = sys.exc_info()[1] # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError: return cls.normalize_exception(sys.exc_info()[1]) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError: e = sys.exc_info()[1] raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_bootstrap is not None: register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_bootstrap is not None: register_finder(importlib_bootstrap.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = imp.new_module(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_bootstrap is not None: register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r"(?P<name>[^-]+)" r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?", re.VERBOSE | re.IGNORECASE ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, env=None, installer=None): if require: self.require(env, installer) else: warnings.warn( "`require` parameter is deprecated. Use " "EntryPoint._load instead.", DeprecationWarning, ) return self._load() def _load(self): module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>[+\w. -]+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version, self.platform, ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) if isinstance( self._parsed_version, packaging.version.LegacyVersion): # While an empty version is techincally a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if self.version: warnings.warn( "'%s (%s)' is being parsed as a legacy, non PEP 440, " "version. You may find odd behavior and sort order. " "In particular it will be sorted as less than 0.0. It " "is recommend to migrate to PEP 440 compatible " "versions." % ( self.project_name, self.version, ), PEP440Warning, ) return self._parsed_version @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from pip._vendor._markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: raise ValueError( "\\ must not appear on the last nonblank line" ) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise ValueError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise ValueError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise ValueError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path, mode=0o777): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, mode) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # Set up global resource manager (deliberately not state-saved) _manager = ResourceManager() def _initialize(g): for name in dir(_manager): if not name.startswith('_'): g[name] = getattr(_manager, name) _initialize(globals()) # Prepare the master working set and make the ``require()`` API available working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path))
rbramwell/pulp
refs/heads/master
server/pulp/server/webservices/views/serializers/dispatch.py
14
from pulp.server.webservices.views.serializers.link import link_obj def task_result_href(task): if task.get('task_id'): return {'_href': '/pulp/api/v2/tasks/%s/' % task['task_id']} return {} def spawned_tasks(task): """ For a given Task dictionary convert the spawned tasks list of ids to a list of link objects :param task: The dictionary representation of a task object in the database :type task: dict """ spawned_tasks = [] spawned = task.get('spawned_tasks') if spawned: for spawned_task_id in spawned: link = link_obj('/pulp/api/v2/tasks/%s/' % spawned_task_id) link['task_id'] = spawned_task_id spawned_tasks.append(link) return {'spawned_tasks': spawned_tasks} def task_status(task): """ Return serialized version of given TaskStatus document. :param task_status: Task status document object :type task_status: pulp.server.db.model.TaskStatus :return: serialized task status :rtype: dict """ task_dict = {} attributes = ['task_id', 'worker_name', 'tags', 'state', 'error', 'spawned_tasks', 'progress_report', 'task_type', 'start_time', 'finish_time', 'result', 'exception', 'traceback', '_ns'] for attribute in attributes: task_dict[attribute] = task[attribute] # This is to preserve backward compatibility for semantic versioning. task_dict['_id'] = task['id'] task_dict['id'] = str(task['id']) return task_dict
kuojo/NocMonkey
refs/heads/master
monkey-stable.py
2
#!/usr/bin/python import os import os.path import sys from PyQt4 import QtGui, QtCore #Initalize Stuff clear = lambda: os.system('cls') class Acknowledgements(object): def __init__(self,flatfile): self.flatfile = str(flatfile) self.initilize(self.flatfile) self.clipboard = QtGui.QApplication.clipboard() #acks = self.write_acks() #self.menu(acks) def write_acks(self): acks = [] with open(str(self.flatfile),"r") as f: for line in f: if line == "\n" or line == "": continue acks.append(line.split(',')) return acks def add_to_acks(self, string): f = open(self.flatfile, "a+") f.write(str(string) + ",\n") f.close() def rewrite_acks(self, acks): f = open(self.flatfile, "w+") for i in range(len(acks)): f.write(",".join(acks[i])) f.close() def add_to_clipboard(self, text): #Orginally I used Tk and this was nesscary self.clipboard.setText(str(text).rstrip("\n")) def is_int(self, value): try: value=int(value) return True except ValueError: return False def nope(self): print "\nNot an Option\n" raw_input("Press enter to continue") def menu(self, acks): copied = "" while True: clear() print copied print "Options: Add, Remove, Quit/Exit, Ack Number" for i in range(len(acks)): print i," ", acks[i], response = raw_input("\nAcknowledgement: ") if self.is_int(response) and int(response) <= len(acks)-1: self.add_to_clipboard(acks[int(response)].rstrip('\n')) copied = "'%s' copied to clipboard\n" % (acks[int(response)].rstrip('\n')) elif response.lower() == "add": response = raw_input("Add what?: ") self.add_to_acks(response) acks.append(response + "\n") elif response.lower() == "remove": response = raw_input("Number to remove: ") if self.is_int(response) and int(response) <= len(acks)-1: del acks[int(response)] self.rewrite_acks(acks) else: self.nope() elif response.lower() == "quit" or response.lower() == "exit": break else: self.nope() return 0 def initilize(self, flatfile): foo = os.path.isfile(flatfile) acks = [] if foo == False: acks = [['Looking into it', '\n'], ['Ignoring per playbook', '\n'], ['Ignoring per announcement', '\n'], ['Watching for 5 mins', '\n'], ['Watching for 6 mins', '\n'], ['Watching for 10 mins', '\n'], ['Watching for 30 mins', '\n'], ['Watching for 90 mins', '\n'], ['Watching for 2 hours', '\n'], ['^^^ Restarting Tomcat', '\n'], ['^^^ Restarting Tomcat and Hp Document Renderer', '\n'], ['^^^ Restaring Amavisd', '\n'], ['^^^ Running Simple Test', '\n'], ['^^^ Checking Healthchecks', '\n'], ['^^^ Checking Connectivity', '\n'], ['^^^ Running Disk Cleanup', '\n'], ['^^^ Writing Ticket', '\n'], ['### Contacting DevOps', '\n'], ['Expected Trigger. Due to Deployment', '\n'], ['Passed Healthcheck', '\n'], ['DevOps are Aware', '\n']] self.rewrite_acks(acks) class Window(QtGui.QMainWindow): def __init__(self): super(Window, self).__init__() self.acks = Acknowledgements("acks.txt") self.initUI() def initUI(self): self.ack = self.acks.write_acks() self.btns = [] self.ackButtons = [QtGui.QWidget(), QtGui.QWidget()] self.vbox = [QtGui.QVBoxLayout(),QtGui.QVBoxLayout()] self.vbox[0].setSpacing(0) self.vbox[1].setSpacing(0) self.ackButtons[0].setLayout(self.vbox[0]) self.ackButtons[1].setLayout(self.vbox[1]) self.stackWidget = QtGui.QStackedWidget() self.closingButton = QtGui.QPushButton("Ok") self.closingButton.setStatusTip("Accept Changes") self.closingButton.clicked.connect(self.changeLayout) self.closingButton.resize(self.closingButton.sizeHint()) self.title=QtGui.QLabel(self) self.title.setFixedSize(150,20) #self.title.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain) self.vbox[1].addWidget(self.title) try: for i in range(len(self.ack)): self.btns.append([QtGui.QPushButton(str(self.ack[i][0])), QtGui.QCheckBox(str(self.ack[i][0]))]) self.btns[i][0].setToolTip(str(self.ack[i][1]).rstrip('\n')) self.btns[i][0].setStatusTip(str(self.ack[i][1]).rstrip('\n')) self.btns[i][0].clicked.connect(self.buttonClicked) self.btns[i][0].resize(self.btns[i][0].sizeHint()) self.vbox[0].addWidget(self.btns[i][0]) self.btns[i][1].stateChanged.connect(self.buttonChecked) self.vbox[1].addWidget(self.btns[i][1]) self.spacer = (QtGui.QWidget(),QtGui.QWidget()) self.vbox[1].addWidget(self.spacer[1]) self.vbox[0].addWidget(self.spacer[0]) self.vbox[1].addWidget(self.closingButton) self.stackWidget.addWidget(self.ackButtons[0]) self.stackWidget.addWidget(self.ackButtons[1]) self.setCentralWidget(self.stackWidget) except IndexError: errMsg = QtGui.QLabel(self) errMsg.setText("Error:\n\nThis happened because the file 'acks.txt' is formatted incorrectly.\n'acks.txt' should never be directly edited.\nYou should delete the file then reopen NocMonkey") errMsg.setMargin(10) errMsg.setFont(QtGui.QFont("Arial", 15, 75, False)) self.setCentralWidget(errMsg) #Dropped the 'n' because of the addAction method, and because I am lazy addActio = QtGui.QAction(QtGui.QIcon("add.png"), '&Add a Trigger', self) addActio.setShortcut("Ctrl+A") addActio.setStatusTip("Add Acknowledgement") addActio.triggered.connect(self.appendButton) removeActio = QtGui.QAction(QtGui.QIcon("remove.png"), "&Remove Button", self) removeActio.setShortcut("Ctrl+R") removeActio.setStatusTip("Remove Acknowledgement") removeActio.triggered.connect(self.removeButton) self.rmButton = False hintActio = QtGui.QAction(QtGui.QIcon("tooltip.png"), "&Add Button Hint", self) hintActio.setShortcut("Ctrl+H") hintActio.setStatusTip("Add a hint to the trigger") hintActio.triggered.connect(self.addTooltip) self.addHint = False editActio = QtGui.QAction("&Edit Buttons", self) editActio.setShortcut("Ctrl+E") editActio.setStatusTip("Change the text on the buttons") editActio.triggered.connect(self.editButton) self.viButton = False #This action doesn't work, don't try it topActio = QtGui.QAction(QtGui.QIcon("ontop.png"), "&Top", self) topActio.setShortcut("Ctrl+T") topActio.setStatusTip("Place application always on top") topActio.triggered.connect(self.alwaysOnTop) self.onTop = False exitActio = QtGui.QAction(QtGui.QIcon("need_to_add.png"), '&Exit', self) exitActio.setShortcut("Ctrl+Q") exitActio.setStatusTip("Exit application") exitActio.triggered.connect(self.close) self.statusBar() menubar = self.menuBar() filemenu = menubar.addMenu('&File') filemenu.addAction(exitActio) editmenu = menubar.addMenu('&Edit') editmenu.addAction(addActio) editmenu.addAction(editActio) editmenu.addAction(hintActio) editmenu.addAction(removeActio) self.setWindowTitle('Noc Monkey') self.setWindowIcon(QtGui.QIcon('monkey-head.png')) #self.acks.clipboard.connect(self.acks.clipboard, QtCore.SIGNAL("dataChanged()"), self.clipStats) self.acks.clipboard.dataChanged.connect(self.clipStatus) self.show() def buttonClicked(self): sender = self.sender() self.acks.add_to_clipboard(sender.text()) self.statusBar().showMessage("'" + sender.text() +"' was copied") def appendButton(self): if self.stackWidget.currentIndex() != 0: self.changeLayout() text, ok = QtGui.QInputDialog.getText(self, 'Add Acknowledgement', 'Enter new acknowledgement:') if ok: self.acks.add_to_acks(text) self.ack.append([str(text), "\n"]) self.btns.append([QtGui.QPushButton(str(text).rstrip('\n')),QtGui.QCheckBox(str(text))]) i=len(self.btns)-1 self.btns[i][0].clicked.connect(self.buttonClicked) self.btns[i][1].stateChanged.connect(self.buttonChecked) self.btns[i][0].resize(self.btns[i][0].sizeHint()) self.vbox[1].removeWidget(self.spacer[1]) self.vbox[0].removeWidget(self.spacer[0]) self.vbox[1].removeWidget(self.closingButton) self.vbox[0].addWidget(self.btns[i][0]) self.vbox[1].addWidget(self.btns[i][1]) self.vbox[1].addWidget(self.spacer[1]) self.vbox[0].addWidget(self.spacer[0]) self.vbox[1].addWidget(self.closingButton) else: pass def removeButton(self): self.title.setText("Remove Acknowledgement") self.rmButton = True self.addHint = False self.viButton = False self.stackWidget.setCurrentIndex(1) def addTooltip(self): self.title.setText("Add a Hint to a Trigger") self.rmButton = False self.addHint = True self.viButton = False self.stackWidget.setCurrentIndex(1) def editButton(self): self.title.setText("Change Button text") self.rmButton = False self.addHint = False self.viButton = True self.stackWidget.setCurrentIndex(1) def clipStatus(self): if self.acks.clipboard.mimeData().hasText(): self.statusBar().showMessage(self.acks.clipboard.text()) else: pass def alwaysOnTop(self): #This function doesn't work. Causes app to disappear. Not sure why. Sorry :( if self.onTop == False: self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) self.onTop = True else: self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint) self.onTop = False def changeLayout(self): self.rmButton = False self.addHint = False self.viButton = False self.title.setText("") self.stackWidget.setCurrentIndex(0) def buttonChecked(self): sender = self.sender() if self.rmButton == True: for i in range(len(self.ack)): try: self.ack[i].index(str(sender.text())) del self.ack[i] self.vbox[0].removeWidget(self.btns[i][0]) self.vbox[1].removeWidget(self.btns[i][1]) self.btns[i][0].deleteLater() self.btns[i][1].deleteLater() del self.btns[i] self.acks.rewrite_acks(self.ack) except ValueError: pass if self.addHint == True: for i in range(len(self.ack)): try: self.ack[i].index(str(sender.text())) text, ok = QtGui.QInputDialog.getText(self, 'Add ToolTip', 'Whats the tooltip:', QtGui.QLineEdit.Normal, str(self.btns[i][0].statusTip())) if ok: self.ack[i][1] = str(text) + "\n" self.btns[i][0].setToolTip(str(text)) self.btns[i][0].setStatusTip(str(text)) self.btns[i][1].setToolTip(str(text)) self.btns[i][1].setStatusTip(str(text)) self.btns[i][1].toggle() self.acks.rewrite_acks(self.ack) else: self.btns[i][1].toggle() except ValueError: pass if self.viButton == True: for i in range(len(self.ack)): try: self.ack[i].index(str(sender.text())) text, ok = QtGui.QInputDialog.getText(self, 'Change Button text', 'Edit Button Text:', QtGui.QLineEdit.Normal, str(self.btns[i][0].text())) if ok: self.ack[i][0] = str(text) self.btns[i][0].setText(str(text)) self.btns[i][1].setText(str(text)) self.btns[i][1].toggle() self.acks.rewrite_acks(self.ack) else: self.btns[i][1].toggle() except ValueError: pass def main(): app = QtGui.QApplication(sys.argv) window = Window() sys.exit(app.exec_()) if __name__ == '__main__': main()
SnabbCo/snabbswitch
refs/heads/master
src/program/lwaftr/tests/data/add-dot1q.py
14
#! /usr/bin/env python2 import sys from scapy.all import rdpcap, wrpcap, NoPayload, Ether, Dot1Q if len(sys.argv) not in (3, 4): raise SystemExit("Usage: " + sys.argv[0] + " input.pcap output.pcap [vlan-id]") VLAN_ID = 42 if len(sys.argv) == 4: try: VLAN_ID = int(sys.argv[3]) except: raise SystemExit("'" + sys.argv[3] + "' is not a valid VLAN identifier") packets = [] for packet in rdpcap(sys.argv[1]): layer = packet.firstlayer() while not isinstance(layer, NoPayload): if type(layer) is Ether: # adjust ether type layer.type = 0x8100 # add 802.1q layer between Ether and IP dot1q = Dot1Q(vlan=VLAN_ID) dot1q.add_payload(layer.payload) layer.remove_payload() layer.add_payload(dot1q) layer = dot1q layer = layer.payload packets.append(packet) wrpcap(sys.argv[2], packets)
umrashrf/scrapy
refs/heads/master
tests/test_utils_spider.py
147
import unittest from scrapy.http import Request from scrapy.item import BaseItem from scrapy.utils.spider import iterate_spider_output, iter_spider_classes from scrapy.spiders import CrawlSpider class MyBaseSpider(CrawlSpider): pass # abstract spider class MySpider1(MyBaseSpider): name = 'myspider1' class MySpider2(MyBaseSpider): name = 'myspider2' class UtilsSpidersTestCase(unittest.TestCase): def test_iterate_spider_output(self): i = BaseItem() r = Request('http://scrapytest.org') o = object() self.assertEqual(list(iterate_spider_output(i)), [i]) self.assertEqual(list(iterate_spider_output(r)), [r]) self.assertEqual(list(iterate_spider_output(o)), [o]) self.assertEqual(list(iterate_spider_output([r, i, o])), [r, i, o]) def test_iter_spider_classes(self): import tests.test_utils_spider it = iter_spider_classes(tests.test_utils_spider) self.assertEqual(set(it), {MySpider1, MySpider2}) if __name__ == "__main__": unittest.main()
fillycheezstake/MissionPlanner
refs/heads/master
Lib/distutils/command/__init__.py
59
"""distutils.command Package containing implementation of all the standard Distutils commands.""" __revision__ = "$Id$" __all__ = ['build', 'build_py', 'build_ext', 'build_clib', 'build_scripts', 'clean', 'install', 'install_lib', 'install_headers', 'install_scripts', 'install_data', 'sdist', 'register', 'bdist', 'bdist_dumb', 'bdist_rpm', 'bdist_wininst', 'upload', 'check', # These two are reserved for future use: #'bdist_sdux', #'bdist_pkgtool', # Note: # bdist_packager is not included because it only provides # an abstract base class ]
rest-of/the-deck
refs/heads/master
lambda/lib/python2.7/site-packages/wheel/pkginfo.py
565
"""Tools for reading and writing PKG-INFO / METADATA without caring about the encoding.""" from email.parser import Parser try: unicode _PY3 = False except NameError: _PY3 = True if not _PY3: from email.generator import Generator def read_pkg_info_bytes(bytestr): return Parser().parsestr(bytestr) def read_pkg_info(path): with open(path, "r") as headers: message = Parser().parse(headers) return message def write_pkg_info(path, message): with open(path, 'w') as metadata: Generator(metadata, maxheaderlen=0).flatten(message) else: from email.generator import BytesGenerator def read_pkg_info_bytes(bytestr): headers = bytestr.decode(encoding="ascii", errors="surrogateescape") message = Parser().parsestr(headers) return message def read_pkg_info(path): with open(path, "r", encoding="ascii", errors="surrogateescape") as headers: message = Parser().parse(headers) return message def write_pkg_info(path, message): with open(path, "wb") as out: BytesGenerator(out, maxheaderlen=0).flatten(message)
kenorb-contrib/BitTorrent
refs/heads/master
twisted/plugins/twisted_telnet.py
20
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. from twisted.scripts.mktap import _tapHelper TwistedTelnet = _tapHelper( "Twisted Telnet Shell Server", "twisted.tap.telnet", "A simple, telnet-based remote debugging service.", "telnet")
apparena/docs
refs/heads/master
readthedocs/projects/views/__init__.py
12133432
zanderle/django
refs/heads/master
django/conf/locale/el/__init__.py
12133432
bnprk/django-oscar
refs/heads/master
sites/demo/apps/__init__.py
12133432
wong2/sentry
refs/heads/master
tests/sentry/web/__init__.py
12133432
mujiansu/arangodb
refs/heads/devel
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/scripts/ce/pysynch.py
38
# Simple CE synchronisation utility with Python features. import wincerapi import win32api import win32file import getopt import sys import os import string import win32con import fnmatch class InvalidUsage(Exception): pass def print_error(api_exc, msg): hr, fn, errmsg = api_exc print "%s - %s(%d)" % (msg, errmsg, hr) def GetFileAttributes(file, local=1): if local: return win32api.GetFileAttributes(file) else: return wincerapi.CeGetFileAttributes(file) def FindFiles(spec, local=1): if local: return win32api.FindFiles(spec) else: return wincerapi.CeFindFiles(spec) def isdir(name, local=1): try: attr = GetFileAttributes(name, local) return attr & win32con.FILE_ATTRIBUTE_DIRECTORY except win32api.error: return 0 def CopyFileToCe(src_name, dest_name, progress = None): sh = win32file.CreateFile(src_name, win32con.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None) bytes=0 try: dh = wincerapi.CeCreateFile(dest_name, win32con.GENERIC_WRITE, 0, None, win32con.OPEN_ALWAYS, 0, None) try: while 1: hr, data = win32file.ReadFile(sh, 2048) if not data: break wincerapi.CeWriteFile(dh, data) bytes = bytes + len(data) if progress is not None: progress(bytes) finally: pass dh.Close() finally: sh.Close() return bytes def BuildFileList(spec, local, recurse, filter, filter_args, recursed_path = ""): files = [] if isdir(spec, local): path = spec raw_spec = "*" else: path, raw_spec = os.path.split(spec) if recurse: # Need full scan, to get sub-direcetories. infos = FindFiles(os.path.join(path, "*"), local) else: infos = FindFiles(os.path.join(path, raw_spec), local) for info in infos: src_name = str(info[8]) full_src_name = os.path.join(path, src_name) if local: # Can't do this for CE! full_src_name = win32api.GetFullPathName(full_src_name) if isdir(full_src_name, local) : if recurse and src_name not in ['.','..']: new_spec = os.path.join(full_src_name, raw_spec) files = files + BuildFileList(new_spec, local, 1, filter, filter_args, os.path.join(recursed_path, src_name)) if fnmatch.fnmatch(src_name, raw_spec): rel_name = os.path.join(recursed_path, src_name) filter_data = filter( full_src_name, rel_name, info, local, filter_args ) if filter_data is not None: files.append( (full_src_name, info, filter_data) ) return files def _copyfilter(full_name, rel_name, info, local, bMaintainDir): if isdir(full_name, local): return if bMaintainDir: return rel_name return os.path.split(rel_name)[1] import pywin.dialogs.status, win32ui class FileCopyProgressDialog(pywin.dialogs.status.CStatusProgressDialog): def CopyProgress(self, bytes): self.Set(bytes/1024) def copy( args ): """copy src [src ...], dest Copy files to/from the CE device """ bRecurse = bVerbose = 0 bMaintainDir = 1 try: opts, args = getopt.getopt(args, "rv") except getopt.error, details: raise InvalidUsage(details) for o, v in opts: if o=="-r": bRecuse=1 elif o=='-v': bVerbose=1 if len(args)<2: raise InvalidUsage("Must specify a source and destination") src = args[:-1] dest = args[-1] # See if WCE: leading anywhere indicates a direction. if string.find(src[0], "WCE:")==0: bToDevice = 0 elif string.find(dest, "WCE:")==0: bToDevice = 1 else: # Assume copy to device. bToDevice = 1 if not isdir(dest, not bToDevice): print "%s does not indicate a directory" files = [] # List of FQ (from_name, to_name) num_files = 0 num_bytes = 0 dialog = FileCopyProgressDialog("Copying files") dialog.CreateWindow(win32ui.GetMainFrame()) if bToDevice: for spec in src: new = BuildFileList(spec, 1, bRecurse, _copyfilter, bMaintainDir) if not new: print "Warning: '%s' did not match any files" % (spec) files = files + new for full_src, src_info, dest_info in files: dest_name = os.path.join(dest, dest_info) size = src_info[5] print "Size=", size if bVerbose: print full_src, "->", dest_name,"- ", dialog.SetText(dest_name) dialog.Set(0, size/1024) bytes = CopyFileToCe(full_src, dest_name, dialog.CopyProgress) num_bytes = num_bytes + bytes if bVerbose: print bytes, "bytes" num_files = num_files + 1 dialog.Close() print "%d files copied (%d bytes)" % (num_files, num_bytes) def _dirfilter(*args): return args[1] def dir(args): """dir directory_name ... Perform a directory listing on the remote device """ bRecurse = 0 try: opts, args = getopt.getopt(args, "r") except getopt.error, details: raise InvalidUsage(details) for o, v in opts: if o=="-r": bRecurse=1 for arg in args: print "Directory of WCE:%s" % arg files = BuildFileList(arg, 0, bRecurse, _dirfilter, None) total_size=0 for full_name, info, rel_name in files: date_str = info[3].Format("%d-%b-%Y %H:%M") attr_string = " " if info[0] & win32con.FILE_ATTRIBUTE_DIRECTORY: attr_string = "<DIR>" print "%s %s %10d %s" % (date_str, attr_string, info[5], rel_name) total_size = total_size + info[5] print " " * 14 + "%3d files, %10d bytes" % (len(files), total_size) def run(args): """run program [args] Starts the specified program on the remote device. """ prog_args = [] for arg in args: if " " in arg: prog_args.append('"' + arg + '"') else: prog_args.append(arg) prog_args = string.join(prog_args, " ") wincerapi.CeCreateProcess(prog_args, "", None, None, 0, 0, None, "", None) def delete(args): """delete file, ... Delete one or more remote files """ for arg in args: try: wincerapi.CeDeleteFile(arg) print "Deleted: %s" % arg except win32api.error, details: print_error(details, "Error deleting '%s'" % arg) def DumpCommands(): print "%-10s - %s" % ("Command", "Description") print "%-10s - %s" % ("-------", "-----------") for name, item in globals().items(): if type(item)==type(DumpCommands): doc = getattr(item, "__doc__", "") if doc: lines = string.split(doc, "\n") print "%-10s - %s" % (name, lines[0]) for line in lines[1:]: if line: print " " * 8, line def main(): if len(sys.argv)<2: print "You must specify a command!" DumpCommands() return command = sys.argv[1] fn = globals().get(command) if fn is None: print "Unknown command:", command DumpCommands() return wincerapi.CeRapiInit() try: verinfo = wincerapi.CeGetVersionEx() print "Connected to device, CE version %d.%d %s" % (verinfo[0], verinfo[1], verinfo[4]) try: fn(sys.argv[2:]) except InvalidUsage, msg: print "Invalid syntax -", msg print fn.__doc__ finally: try: wincerapi.CeRapiUninit() except win32api.error, details: print_error(details, "Error disconnecting") if __name__=='__main__': main()
CYBAI/servo
refs/heads/master
tests/wpt/web-platform-tests/css/css-fonts/support/fonts/makegsubfonts.py
30
from __future__ import print_function import os import textwrap from xml.etree import ElementTree from fontTools.ttLib import TTFont, newTable from fontTools.misc.psCharStrings import T2CharString from fontTools.ttLib.tables.otTables import GSUB,\ ScriptList, ScriptRecord, Script, DefaultLangSys,\ FeatureList, FeatureRecord, Feature,\ LookupList, Lookup, AlternateSubst, SingleSubst # paths directory = os.path.dirname(__file__) shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx") shellTempPath = os.path.join(directory, "gsubtest-shell.otf") featureList = os.path.join(directory, "gsubtest-features.txt") javascriptData = os.path.join(directory, "gsubtest-features.js") outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d") baseCodepoint = 0xe000 # ------- # Features # ------- f = open(featureList, "rb") text = f.read() f.close() mapping = [] for line in text.splitlines(): line = line.strip() if not line: continue if line.startswith("#"): continue # parse values = line.split("\t") tag = values.pop(0) mapping.append(tag); # -------- # Outlines # -------- def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None): charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) charStringsIndex.append(charString) glyphID = len(topDict.charset) charStrings.charStrings[glyphName] = glyphID topDict.charset.append(glyphName) def makeLookup1(): # make a variation of the shell TTX data f = open(shellSourcePath) ttxData = f.read() f.close() ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1") tempShellSourcePath = shellSourcePath + ".temp" f = open(tempShellSourcePath, "wb") f.write(ttxData) f.close() # compile the shell shell = TTFont(sfntVersion="OTTO") shell.importXML(tempShellSourcePath) shell.save(shellTempPath) os.remove(tempShellSourcePath) # load the shell shell = TTFont(shellTempPath) # grab the PASS and FAIL data hmtx = shell["hmtx"] glyphSet = shell.getGlyphSet() failGlyph = glyphSet["F"] failGlyph.decompile() failGlyphProgram = list(failGlyph.program) failGlyphMetrics = hmtx["F"] passGlyph = glyphSet["P"] passGlyph.decompile() passGlyphProgram = list(passGlyph.program) passGlyphMetrics = hmtx["P"] # grab some tables hmtx = shell["hmtx"] cmap = shell["cmap"] # start the glyph order existingGlyphs = [".notdef", "space", "F", "P"] glyphOrder = list(existingGlyphs) # start the CFF cff = shell["CFF "].cff globalSubrs = cff.GlobalSubrs topDict = cff.topDictIndex[0] topDict.charset = existingGlyphs private = topDict.Private charStrings = topDict.CharStrings charStringsIndex = charStrings.charStringsIndex features = sorted(mapping) # build the outline, hmtx and cmap data cp = baseCodepoint for index, tag in enumerate(features): # tag.pass glyphName = "%s.pass" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError("Unsupported cmap table format: %d" % table.format) cp += 1 # tag.fail glyphName = "%s.fail" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError("Unsupported cmap table format: %d" % table.format) # bump this up so that the sequence is the same as the lookup 3 font cp += 3 # set the glyph order shell.setGlyphOrder(glyphOrder) # start the GSUB shell["GSUB"] = newTable("GSUB") gsub = shell["GSUB"].table = GSUB() gsub.Version = 1.0 # make a list of all the features we will make featureCount = len(features) # set up the script list scriptList = gsub.ScriptList = ScriptList() scriptList.ScriptCount = 1 scriptList.ScriptRecord = [] scriptRecord = ScriptRecord() scriptList.ScriptRecord.append(scriptRecord) scriptRecord.ScriptTag = "DFLT" script = scriptRecord.Script = Script() defaultLangSys = script.DefaultLangSys = DefaultLangSys() defaultLangSys.FeatureCount = featureCount defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount) defaultLangSys.ReqFeatureIndex = 65535 defaultLangSys.LookupOrder = None script.LangSysCount = 0 script.LangSysRecord = [] # set up the feature list featureList = gsub.FeatureList = FeatureList() featureList.FeatureCount = featureCount featureList.FeatureRecord = [] for index, tag in enumerate(features): # feature record featureRecord = FeatureRecord() featureRecord.FeatureTag = tag feature = featureRecord.Feature = Feature() featureList.FeatureRecord.append(featureRecord) # feature feature.FeatureParams = None feature.LookupCount = 1 feature.LookupListIndex = [index] # write the lookups lookupList = gsub.LookupList = LookupList() lookupList.LookupCount = featureCount lookupList.Lookup = [] for tag in features: # lookup lookup = Lookup() lookup.LookupType = 1 lookup.LookupFlag = 0 lookup.SubTableCount = 1 lookup.SubTable = [] lookupList.Lookup.append(lookup) # subtable subtable = SingleSubst() subtable.Format = 2 subtable.LookupType = 1 subtable.mapping = { "%s.pass" % tag : "%s.fail" % tag, "%s.fail" % tag : "%s.pass" % tag, } lookup.SubTable.append(subtable) path = outputPath % 1 + ".otf" if os.path.exists(path): os.remove(path) shell.save(path) # get rid of the shell if os.path.exists(shellTempPath): os.remove(shellTempPath) def makeLookup3(): # make a variation of the shell TTX data f = open(shellSourcePath) ttxData = f.read() f.close() ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3") tempShellSourcePath = shellSourcePath + ".temp" f = open(tempShellSourcePath, "wb") f.write(ttxData) f.close() # compile the shell shell = TTFont(sfntVersion="OTTO") shell.importXML(tempShellSourcePath) shell.save(shellTempPath) os.remove(tempShellSourcePath) # load the shell shell = TTFont(shellTempPath) # grab the PASS and FAIL data hmtx = shell["hmtx"] glyphSet = shell.getGlyphSet() failGlyph = glyphSet["F"] failGlyph.decompile() failGlyphProgram = list(failGlyph.program) failGlyphMetrics = hmtx["F"] passGlyph = glyphSet["P"] passGlyph.decompile() passGlyphProgram = list(passGlyph.program) passGlyphMetrics = hmtx["P"] # grab some tables hmtx = shell["hmtx"] cmap = shell["cmap"] # start the glyph order existingGlyphs = [".notdef", "space", "F", "P"] glyphOrder = list(existingGlyphs) # start the CFF cff = shell["CFF "].cff globalSubrs = cff.GlobalSubrs topDict = cff.topDictIndex[0] topDict.charset = existingGlyphs private = topDict.Private charStrings = topDict.CharStrings charStringsIndex = charStrings.charStringsIndex features = sorted(mapping) # build the outline, hmtx and cmap data cp = baseCodepoint for index, tag in enumerate(features): # tag.pass glyphName = "%s.pass" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics # tag.fail glyphName = "%s.fail" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics # tag.default glyphName = "%s.default" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError("Unsupported cmap table format: %d" % table.format) cp += 1 # tag.alt1,2,3 for i in range(1,4): glyphName = "%s.alt%d" % (tag, i) glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError("Unsupported cmap table format: %d" % table.format) cp += 1 # set the glyph order shell.setGlyphOrder(glyphOrder) # start the GSUB shell["GSUB"] = newTable("GSUB") gsub = shell["GSUB"].table = GSUB() gsub.Version = 1.0 # make a list of all the features we will make featureCount = len(features) # set up the script list scriptList = gsub.ScriptList = ScriptList() scriptList.ScriptCount = 1 scriptList.ScriptRecord = [] scriptRecord = ScriptRecord() scriptList.ScriptRecord.append(scriptRecord) scriptRecord.ScriptTag = "DFLT" script = scriptRecord.Script = Script() defaultLangSys = script.DefaultLangSys = DefaultLangSys() defaultLangSys.FeatureCount = featureCount defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount) defaultLangSys.ReqFeatureIndex = 65535 defaultLangSys.LookupOrder = None script.LangSysCount = 0 script.LangSysRecord = [] # set up the feature list featureList = gsub.FeatureList = FeatureList() featureList.FeatureCount = featureCount featureList.FeatureRecord = [] for index, tag in enumerate(features): # feature record featureRecord = FeatureRecord() featureRecord.FeatureTag = tag feature = featureRecord.Feature = Feature() featureList.FeatureRecord.append(featureRecord) # feature feature.FeatureParams = None feature.LookupCount = 1 feature.LookupListIndex = [index] # write the lookups lookupList = gsub.LookupList = LookupList() lookupList.LookupCount = featureCount lookupList.Lookup = [] for tag in features: # lookup lookup = Lookup() lookup.LookupType = 3 lookup.LookupFlag = 0 lookup.SubTableCount = 1 lookup.SubTable = [] lookupList.Lookup.append(lookup) # subtable subtable = AlternateSubst() subtable.Format = 1 subtable.LookupType = 3 subtable.alternates = { "%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag], "%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag], "%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag], "%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag] } lookup.SubTable.append(subtable) path = outputPath % 3 + ".otf" if os.path.exists(path): os.remove(path) shell.save(path) # get rid of the shell if os.path.exists(shellTempPath): os.remove(shellTempPath) def makeJavascriptData(): features = sorted(mapping) outStr = [] outStr.append("") outStr.append("/* This file is autogenerated by makegsubfonts.py */") outStr.append("") outStr.append("/* ") outStr.append(" Features defined in gsubtest fonts with associated base") outStr.append(" codepoints for each feature:") outStr.append("") outStr.append(" cp = codepoint for feature featX") outStr.append("") outStr.append(" cp default PASS") outStr.append(" cp featX=1 FAIL") outStr.append(" cp featX=2 FAIL") outStr.append("") outStr.append(" cp+1 default FAIL") outStr.append(" cp+1 featX=1 PASS") outStr.append(" cp+1 featX=2 FAIL") outStr.append("") outStr.append(" cp+2 default FAIL") outStr.append(" cp+2 featX=1 FAIL") outStr.append(" cp+2 featX=2 PASS") outStr.append("") outStr.append("*/") outStr.append("") outStr.append("var gFeatures = {"); cp = baseCodepoint taglist = [] for tag in features: taglist.append("\"%s\": 0x%x" % (tag, cp)) cp += 4 outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" ")) outStr.append("};"); outStr.append(""); if os.path.exists(javascriptData): os.remove(javascriptData) f = open(javascriptData, "wb") f.write("\n".join(outStr)) f.close() # build fonts print("Making lookup type 1 font...") makeLookup1() print("Making lookup type 3 font...") makeLookup3() # output javascript data print("Making javascript data file...") makeJavascriptData()
djgagne/scikit-learn
refs/heads/master
sklearn/metrics/cluster/__init__.py
312
""" The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for cluster analysis results. There are two forms of evaluation: - supervised, which uses a ground truth class values for each sample. - unsupervised, which does not and measures the 'quality' of the model itself. """ from .supervised import adjusted_mutual_info_score from .supervised import normalized_mutual_info_score from .supervised import adjusted_rand_score from .supervised import completeness_score from .supervised import contingency_matrix from .supervised import expected_mutual_information from .supervised import homogeneity_completeness_v_measure from .supervised import homogeneity_score from .supervised import mutual_info_score from .supervised import v_measure_score from .supervised import entropy from .unsupervised import silhouette_samples from .unsupervised import silhouette_score from .bicluster import consensus_score __all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score", "adjusted_rand_score", "completeness_score", "contingency_matrix", "expected_mutual_information", "homogeneity_completeness_v_measure", "homogeneity_score", "mutual_info_score", "v_measure_score", "entropy", "silhouette_samples", "silhouette_score", "consensus_score"]
gadomski/cpd
refs/heads/master
vendor/googletest-release-1.10.0/googletest/test/gtest_json_test_utils.py
134
# Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_json_output.""" import re def normalize(obj): """Normalize output object. Args: obj: Google Test's JSON output object to normalize. Returns: Normalized output without any references to transient information that may change from run to run. """ def _normalize(key, value): if key == 'time': return re.sub(r'^\d+(\.\d+)?s$', '*', value) elif key == 'timestamp': return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value) elif key == 'failure': value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value) return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value) else: return normalize(value) if isinstance(obj, dict): return {k: _normalize(k, v) for k, v in obj.items()} if isinstance(obj, list): return [normalize(x) for x in obj] else: return obj
suncycheng/intellij-community
refs/heads/master
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_0/_pkg1_0_0_1/_pkg1_0_0_1_0/_mod1_0_0_1_0_0.py
30
name1_0_0_1_0_0_0 = None name1_0_0_1_0_0_1 = None name1_0_0_1_0_0_2 = None name1_0_0_1_0_0_3 = None name1_0_0_1_0_0_4 = None
orezpraw/unnaturalcode
refs/heads/master
unnaturalcode/testdata/makehuman/makehuman/shared/progress.py
2
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- """ Definition of Progress class. **Project Name:** MakeHuman **Product Home Page:** http://www.makehuman.org/ **Code Home Page:** https://bitbucket.org/MakeHuman/makehuman/ **Authors:** Thanasis Papoutsidakis **Copyright(c):** MakeHuman Team 2001-2014 **Licensing:** AGPL3 (http://www.makehuman.org/doc/node/the_makehuman_application.html) This file is part of MakeHuman (www.makehuman.org). This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. **Coding Standards:** See http://www.makehuman.org/node/165 Abstract -------- The Progress module defines the Progress class, which provides an easy interface for handling MH's progress bar. It automatically processes porgress updates in subroutines, so passing progress callbacks as function parameters is needless. *-- Usage --* from progress import Progress # Standard usage. def foo(): progress = Progress() ... # do stuff # progress(0.7) ... # more stuff # progress(1.0) # Usage in steps. def bar(): progress = Progress(42) ... # step 1 # progress.step() ... # step 2 # progress.step() .... .... ... # step 42 # progress.step() # Usage in loops. def baz(items): progress = Progress(len(items)) for item in items: loopprog = Progress() ... # do stuff # loopprog(0.3) ... # more stuff # loopprog(0.6) ... # even more stuff # progress.step() # All together!!! def FooBarBaz(): progress = Progress.begin() progress(0, 0.3, "Getting some foo") somefoo = foo() progress(0.3, 0.7, None) prog2 = Progress() (0, 0.5, "Getting a bar") bar1 = bar() prog2(0.5, 1, "Getting another bar") bar2 = bar() progress(0.7, 0.99, "Bazzing them all together") bazzable = [somefoo, bar1, bar2] baz(bazzable) progress(1.0, None, "Foobar bazzed.") ----- - Weighted steps Progress constructor can accept an iterable as the steps parameter. In that case, the weighted step mode is activated, and steps with greater weight in the iterable affect larger area of the progress bar. Example: progress = Progress([7, 3, 6, 6]) - Logging With the logging=True option, Progress will log.debug its progress and description for every change in its progress. (This does not include sub-Progresses that have logging disabled.) A number of dashes is added at the beginning of the log message representing the level of nesting of the current procedure, to help distinguish between the messages logged from the parent and the child Progress. If messaging is enabled too, on a description change, Progress will log.debug only its progress, and will let messaging to log.message the description afterwards. - Timing If logging is enabled, with the timing=True option, Progress will measure and log.debug the time each step took to complete, as well as the total time needed for the whole procedure. - Messaging With the messaging=True option, Progress will log.message its description every time it changes. """ global current_Progress_ current_Progress_ = None class Progress(object): class LoggingRequest(object): def __init__(self, text, *args): self.text = text self.args = args self.level = 0 self.withLogger('debug') def withLogger(self, loggerstr): import log self.logger = getattr(log, loggerstr) return self def propagate(self): self.level += 1 def execute(self): text = self.level * '-' + self.text self.logger(text, *self.args) def __init__(self, steps=0, progressCallback=True, logging=False, timing=False, messaging=False): global current_Progress_ self.progress = 0.0 self.nextprog = None self.steps = steps self.stepsdone = 0 self._description = None self.args = [] self.description_changed = False # Weighted steps feature if hasattr(self.steps, '__iter__'): ssum = float(sum(self.steps)) self.stepweights = [s / ssum for s in self.steps] self.steps = len(self.steps) else: self.stepweights = None self.time = None self.totalTime = 0.0 self.logging = logging self.timing = timing self.messaging = messaging self.logging_requests = [] # Push self in the global Progress object stack. self.parent = current_Progress_ current_Progress_ = self # If this is a master Progress, get the callback # that updates MH's progress bar. if self.parent is None: if progressCallback is True: from core import G self.progressCallback = G.app.progress else: # Bypass importing if the user provided us # with a custom progress callback. self.progressCallback = progressCallback # To completely disable updating when this is a # master Progress, pass None as progressCallback. def getDescription(self): return self._description def setDescription(self, desc): self._description = desc self.description_changed = True description = property(getDescription, setDescription) def stepWeight(self): '''Internal method that returns the weight of the next step.''' if self.steps == 0: if self.nextprog is None: return 0 else: return self.nextprog - self.progress elif self.stepweights is None: return 1.0 / float(self.steps) else: return self.stepweights[self.stepsdone] def update(self, prog=None, desc=None, args=[], is_childupdate=False): '''Internal method that is responsible for the actual progress bar updating.''' if prog is None: prog = self.progress if desc is None and self.description: desc = self.description args = self.args desc_str = "" if desc is None else desc if not is_childupdate: if self.timing: import time t = time.time() if self.time: deltaT = (t - self.time) self.totalTime += deltaT if self.logging: self.logging_requests.append( self.LoggingRequest(" took %.4f seconds", deltaT)) self.time = t if self.logging: if self.messaging and self.description_changed: self.logging_requests.append(self.LoggingRequest( self.getProgressString())) else: # TODO: Format desc with args self.logging_requests.append(self.LoggingRequest( "%s: %s", self.getProgressString(), desc_str)) if self.messaging and self.description_changed: self.logging_requests.append(self.LoggingRequest( desc_str, *args).withLogger('message')) self.description_changed = False self.propagateRequests() if self.parent is None: for r in self.logging_requests: r.execute() self.logging_requests = [] if self.progressCallback is not None: self.progressCallback(prog, desc_str, *args) if self.steps and self.stepsdone == self.steps or \ self.steps == 0 and self.progress >= 0.999999: # Not using 1.0 for precision safety. self.finish() if self.parent: self.parent.childupdate(prog, desc, args) def propagateRequests(self): '''Internal method that recursively passes the logging requests to the master Progress.''' if self.parent is not None: for r in self.logging_requests: r.propagate() self.parent.logging_requests.extend(self.logging_requests) self.logging_requests = [] self.parent.propagateRequests() def childupdate(self, prog, desc, args=[]): '''Internal method that a child Progress calls for doing a progress update by communicating with its parent.''' prog = self.progress + prog * self.stepWeight() self.update(prog, desc, args, is_childupdate=True) def getProgressString(self): if self.steps: return "Step %i/%i" % (self.stepsdone, self.steps) else: return "Progress %.2f%%" % self.progress def finish(self): '''Method to be called when a subroutine has finished, either explicitly (by the user), or implicitly (automatically when progress reaches 1.0).''' global current_Progress_ if self.parent is None and self.logging and self.timing: import log log.debug("Total time taken: %s seconds.", self.totalTime) current_Progress_ = self.parent def __call__(self, progress, end=None, desc=False, *args): '''Basic method for progress updating. It overloads the () operator of the constructed object. Pass None to desc to disable the description; the parent will update it instead in that case.''' global current_Progress_ current_Progress_ = self if not (desc is False): self.description = desc self.args = args self.progress = progress self.nextprog = end self.update() return self def step(self, desc=False, *args): '''Method useful for smaller tasks that take a number of roughly equivalent steps to complete. You can use this in a non-stepped Progress to just update the description on the status bar.''' global current_Progress_ current_Progress_ = self if not (desc is False): self.description = desc self.args = args if self.steps: self.progress += self.stepWeight() self.stepsdone += 1 if self.stepsdone == self.steps: self.progress = 1.0 self.update() return self def firststep(self, desc=False, *args): '''Method to be called from Progress routines that work in discrete steps, to update the initial description and initialize the timing.''' global current_Progress_ current_Progress_ = self if not (desc is False): self.description = desc self.args = args self.update() return self @classmethod def begin(cls, *args, **kwargs): '''Class method for directly creating a master Progress object. Resets all progress to zero. Use this for starting a greater MH task. The arguments are forwarded to the Progress constructor. ''' global current_Progress_ current_Progress_ = None return cls(*args, **kwargs) ## Specialized methods follow ## def HighFrequency(self, interval): '''Method that prepares the Progress object to run in a hispeed loop with high number of repetitions, which needs to progress the bar while looping without adding callback overhead. WARNING: ALWAYS test the overhead. Don't use this in extremely fast loops or it might slow them down.''' # Loop number interval between progress updates. self.HFI = interval # Replace original step method with the high frequency step. self.dostep = self.step self.step = self.HFstep return self def HFstep(self): '''Replacement method to be called in a hispeed loop instead of step(). It is replaced internally on HighFrequency() (call step() to use it).''' if self.stepsdone % self.HFI > 0 and self.stepsdone < self.steps - 1: self.stepsdone += 1 else: self.dostep()
j-greffe/mbed-os
refs/heads/master
tools/export/e2studio/__init__.py
23
""" mbed SDK Copyright (c) 2011-2016 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os.path import splitext, basename from tools.export.exporters import Exporter class E2Studio(Exporter): NAME = 'e2 studio' TOOLCHAIN = 'GCC_ARM' TARGETS = [ 'RZ_A1H', ] def generate(self): libraries = [] for lib in self.resources.libraries: l, _ = splitext(basename(lib)) libraries.append(l[3:]) ctx = { 'name': self.project_name, 'include_paths': self.resources.inc_dirs, 'linker_script': self.resources.linker_script, 'object_files': self.resources.objects, 'libraries': libraries, 'symbols': self.toolchain.get_symbols() } self.gen_file('e2studio/%s_project.tmpl' % self.target.lower(), ctx, '.project') self.gen_file('e2studio/%s_cproject.tmpl' % self.target.lower(), ctx, '.cproject') self.gen_file('e2studio/%s_gdbinit.tmpl' % self.target.lower(), ctx, '.gdbinit') self.gen_file('e2studio/launch.tmpl', ctx, '%s OpenOCD.launch' % self.project_name)
david4096/ga4gh-server
refs/heads/master
ga4gh/server/cli/configtest.py
4
""" configtest cli """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import unittest import ga4gh.server.cli as cli import ga4gh.server.configtest as configtest import ga4gh.common.cli as common_cli class SimplerResult(unittest.TestResult): """ The TestResult class gives formatted tracebacks as error messages, which is not what we want. Instead we just want the error message from the err praram. Hence this subclass. """ def addError(self, test, err): self.errors.append((test, "{0}: {1}".format(err[0].__name__, err[1]))) def addFailure(self, test, err): self.failures.append((test, "{0}: {1}".format(err[0].__name__, err[1]))) def configtest_main(parser=None): if parser is None: parser = common_cli.createArgumentParser( "GA4GH server configuration validator") parser.add_argument( "--config", "-c", default='DevelopmentConfig', type=str, help="The configuration to use") parser.add_argument( "--config-file", "-f", type=str, default=None, help="The configuration file to use") cli.addVersionArgument(parser) args = parser.parse_args() configStr = 'ga4gh.serverconfig:{0}'.format(args.config) configtest.TestConfig.configStr = configStr configtest.TestConfig.configFile = args.config_file configtest.TestConfig.configEnv = "GA4GH_CONFIGURATION" loader = unittest.TestLoader() tests = loader.loadTestsFromModule(configtest) results = SimplerResult() tests.run(results) logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) log.info('{0} Tests run. {1} errors, {2} failures, {3} skipped'. format(results.testsRun, len(results.errors), len(results.failures), len(results.skipped))) for result in results.errors: if result is not None: log.critical('Error: {0}: {1}'.format(result[0].id(), result[1])) for result in results.failures: if result is not None: log.critical('Failure: {0}: {1}'.format(result[0].id(), result[1])) for result in results.skipped: if result is not None: log.info('Skipped: {0}: {1}'.format(result[0].id(), result[1]))
vincent99/cattle
refs/heads/master
tests/integration-v1/cattletest/core/test_healthcheck.py
1
from common import * # NOQA from cattle import ApiError import yaml def _get_agent_for_container(context, container): agent = None for map in container.hosts()[0].instanceHostMaps(): try: c = map.instance() except Exception: continue if c.agentId is not None: agent = c.agent() break if agent is None: client = context.client env = client.create_environment(name='env-' + random_str()) svc = client.create_service(name='agentglobal' + random_str(), launchConfig={ 'imageUuid': context.image_uuid, "labels": { 'io.rancher.scheduler.global': 'true', 'io.rancher.container.create_agent': 'true' } }, stackId=env.id) svc = wait_state(client, svc, 'inactive') client.wait_success(svc.activate()) for map in container.hosts()[0].instanceHostMaps(): try: c = map.instance() except Exception: continue if c.agentId is not None: agent = c.agent() break assert agent is not None return agent def _get_agent_client(agent): creds = agent.account().credentials() api_key = [x for x in creds if x.kind == 'agentApiKey'][0] assert len(api_key) return api_client(api_key.publicValue, api_key.secretValue) def test_upgrade_with_health(client, context, super_client): env = client.create_environment(name='env-' + random_str()) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} svc = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config, scale=1) svc = client.wait_success(svc) assert svc.state == "inactive" env.activateservices() svc = client.wait_success(svc, 120) assert svc.state == "active" # upgrade the service and new_launch_config = {"imageUuid": image_uuid, 'healthCheck': { 'port': 80, }} strategy = {"launchConfig": new_launch_config, "intervalMillis": 100} svc.upgrade_action(inServiceStrategy=strategy) def wait_for_map_count(service): m = super_client. \ list_serviceExposeMap(serviceId=service.id, state='active') return len(m) == 2 def wait_for_managed_map_count(service): m = super_client. \ list_serviceExposeMap(serviceId=service.id, state='active', upgrade=False) return len(m) == 1 wait_for_condition(client, svc, wait_for_map_count) wait_for_condition(client, svc, wait_for_managed_map_count) m = super_client. \ list_serviceExposeMap(serviceId=svc.id, state='active', upgrade=False) c = super_client.reload(m[0].instance()) wait_for(lambda: super_client.reload(c).state == 'running') c = super_client.reload(m[0].instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' # shouldn't become upgraded at this point try: wait_for(lambda: super_client.reload(svc).state == 'upgraded', timeout=10) except Exception: pass _update_healthy(agent, hcihm, c, super_client) wait_for(lambda: super_client.reload(svc).healthState == 'healthy') wait_for(lambda: super_client.reload(svc).state == 'upgraded') super_client.reload(svc).remove() def test_upgrade_start_first_with_health(client, context, super_client): env = client.create_environment(name='env-' + random_str()) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} svc = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config, scale=1) svc = client.wait_success(svc) assert svc.state == "inactive" env.activateservices() svc = client.wait_success(svc, 120) assert svc.state == "active" # upgrade the service and # check that c3 and c2 got the same ip new_launch_config = {"imageUuid": image_uuid, 'healthCheck': { 'port': 80, }} strategy = {"launchConfig": new_launch_config, "intervalMillis": 100, "startFirst": True} svc.upgrade_action(inServiceStrategy=strategy) def wait_for_map_count(service): m = super_client. \ list_serviceExposeMap(serviceId=service.id, state='active') return len(m) == 2 def wait_for_managed_map_count(service): m = super_client. \ list_serviceExposeMap(serviceId=service.id, state='active', upgrade=False) return len(m) == 1 wait_for_condition(client, svc, wait_for_map_count) wait_for_condition(client, svc, wait_for_managed_map_count) m = super_client. \ list_serviceExposeMap(serviceId=svc.id, state='active', upgrade=False) c = super_client.reload(m[0].instance()) wait_for(lambda: super_client.reload(c).state == 'running') c = super_client.reload(m[0].instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' _update_healthy(agent, hcihm, c, super_client) wait_for(lambda: super_client.reload(svc).healthState == 'healthy') wait_for(lambda: super_client.reload(svc).state == 'upgraded') super_client.reload(svc).remove() def test_health_check_create_instance(super_client, context): c = context.create_container(healthCheck={ 'port': 80, }) assert c.healthCheck.port == 80 c = super_client.reload(c) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) se = super_client.wait_success(se) assert se.state == 'created' assert se.accountId == c.accountId assert se.instanceId == c.id assert se.healthcheckInstanceId == hci.id hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' assert hcihm.externalTimestamp == ts wait_for(lambda: super_client.reload(c).healthState == 'healthy') def _create_svc_w_healthcheck(client, context): env = client.create_environment(name='env-' + random_str()) return _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) def test_health_check_create_service(super_client, context, client): service = _create_svc_w_healthcheck(client, context) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='INIT', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') # restart the instance c = super_client.wait_success(c.restart()) wait_for(lambda: super_client.reload(c).state == 'running') wait_for(lambda: super_client.reload(c).healthState == 'reinitializing') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something bad', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'reinitializing') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='INIT', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) se = super_client.wait_success(se) assert se.state == 'created' assert se.accountId == c.accountId assert se.instanceId == c.id assert se.healthcheckInstanceId == hci.id hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'unhealthy' assert hcihm.externalTimestamp == ts wait_for(lambda: super_client.reload(c).healthState == 'unhealthy') wait_for(lambda: len(service.serviceExposeMaps()) == 1) remove_service(service) def test_health_check_ip_retain(super_client, context, client): env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id, retainIp=True) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c1 = super_client.reload(expose_map.instance()) ip1 = c1.primaryIpAddress hci = find_one(c1.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c1) assert hcihm.healthState == 'initializing' assert c1.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c1).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) se = super_client.wait_success(se) assert se.state == 'created' assert se.accountId == c1.accountId assert se.instanceId == c1.id assert se.healthcheckInstanceId == hci.id hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'unhealthy' assert hcihm.externalTimestamp == ts wait_for(lambda: super_client.reload(c1).healthState == 'unhealthy') def check(): s = super_client.reload(service) return s.instanceIds is not None and len(s.instanceIds) == 1 and \ c1.id not in s.instanceIds wait_for(check) for e_map in service.serviceExposeMaps(): if e_map.instance().id == c1.id: continue c2 = wait_state(super_client, e_map.instance(), 'running') assert c2.name == c1.name assert c2.primaryIpAddress == ip1 break remove_service(service) def test_health_state_stack(super_client, context, client): c = client env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) i = 'initializing' wait_for(lambda: super_client.reload(service).healthState == i) wait_for(lambda: client.reload(env).healthState == i) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c1 = super_client.reload(expose_map.instance()) hci = find_one(c1.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c1) assert hcihm.healthState == 'initializing' assert c1.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c1).healthState == 'healthy') wait_for(lambda: super_client.reload(service).healthState == 'healthy') wait_for(lambda: c.reload(env).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) se = super_client.wait_success(se) assert se.state == 'created' assert se.accountId == c1.accountId assert se.instanceId == c1.id assert se.healthcheckInstanceId == hci.id hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'unhealthy' assert hcihm.externalTimestamp == ts wait_for(lambda: super_client.reload(c1).healthState == 'unhealthy') def test_health_state_start_once(super_client, context, client): c = client env = client.create_environment(name='env-' + random_str()) labels = {"io.rancher.container.start_once": "true"} svc = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, }, 'labels': labels }, stackId=env.id) wait_for(lambda: super_client.reload(svc).healthState == 'initializing') wait_for(lambda: c.reload(env).healthState == 'initializing') maps = _wait_until_active_map_count(svc, 1, client) expose_map = maps[0] c1 = super_client.reload(expose_map.instance()) hci = find_one(c1.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c1) assert hcihm.healthState == 'initializing' assert c1.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c1).healthState == 'healthy') wait_for(lambda: super_client.reload(svc).healthState == 'healthy') wait_for(lambda: c.reload(env).healthState == 'healthy') super_client.wait_success(c1.stop()) wait_for(lambda: super_client.reload(svc).healthState == 'started-once') wait_for(lambda: c.reload(env).healthState == 'started-once') remove_service(svc) def test_health_state_sidekick_start_once(super_client, context, client): env = client.create_environment(name='env-' + random_str()) labels = {"io.rancher.container.start_once": "true"} slc = {'imageUuid': context.image_uuid, 'name': "test1"} svc = _create_and_activate_svc(client, 2, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'labels': labels }, stackId=env.id, secondaryLaunchConfigs=[slc]) wait_for(lambda: super_client.reload(svc).healthState == 'healthy') wait_for(lambda: client.reload(env).healthState == 'healthy') remove_service(svc) def test_health_state_selectors(context, client): env = client.create_environment(name='env-' + random_str()) labels = {'foo': "bar"} container1 = client.create_container(imageUuid=context.image_uuid, startOnCreate=True, labels=labels) container1 = client.wait_success(container1) assert container1.state == "running" launch_config = {"imageUuid": "rancher/none"} service = _create_and_activate_svc(client, 1, name=random_str(), stackId=env.id, launchConfig=launch_config, selectorContainer="foo=bar") service = client.wait_success(service) assert service.selectorContainer == "foo=bar" wait_for(lambda: client.reload(service).healthState == 'healthy') wait_for(lambda: client.reload(env).healthState == 'healthy') remove_service(service) def test_svc_health_state(context, client): env = client.create_environment(name='env-' + random_str()) launch_config = {"imageUuid": context.image_uuid} service = _create_and_activate_svc(client, 1, name=random_str(), stackId=env.id, launchConfig=launch_config) wait_for(lambda: client.reload(service).healthState == 'healthy') wait_for(lambda: client.reload(env).healthState == 'healthy') remove_service(service) def test_health_check_init_timeout(super_client, context, client): env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, 'initializingTimeout': 2000, } }, stackId=env.id) h_c = service.launchConfig.healthCheck assert h_c.initializingTimeout == 2000 maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' # wait for the instance to be removed wait_for_condition(client, c, lambda x: x.removed is None) remove_service(service) def test_health_check_reinit_timeout(super_client, context, client): env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, 'reinitializingTimeout': 1, } }, stackId=env.id) h_c = service.launchConfig.healthCheck assert h_c.reinitializingTimeout == 1 maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') # restart the instance c = super_client.wait_success(c.restart()) wait_for(lambda: super_client.reload(c).state == 'running') wait_for(lambda: super_client.reload(c).healthState == 'reinitializing') # wait for the instance to be removed wait_for_condition(super_client, c, lambda x: x.removed is not None) remove_service(service) def test_health_check_bad_external_timestamp(super_client, context, client): env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] container = super_client.reload(expose_map.instance()) hci = find_one(container.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, container) agent_client = _get_agent_client(agent) assert hcihm.healthState == 'initializing' with pytest.raises(ApiError) as e: agent_client.create_service_event(reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) assert e.value.error.code == 'MissingRequired' assert e.value.error.fieldName == 'externalTimestamp' remove_service(service) def test_health_check_noop(super_client, context, client): env = client.create_environment(name='env-' + random_str()) svc = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, 'strategy': 'none' } }, stackId=env.id) assert svc.launchConfig.healthCheck.strategy == 'none' maps = _wait_until_active_map_count(svc, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) hci = find_one(c.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c) assert hcihm.healthState == 'initializing' assert c.healthState == 'initializing' hcihm = _update_healthy(agent, hcihm, c, super_client) svc = wait_state(client, svc, 'active') _update_unhealthy(agent, hcihm, c, super_client) svc = wait_state(client, svc, 'active') assert len(svc.serviceExposeMaps()) == 1 c = super_client.wait_success(c) assert c.state == 'running' remove_service(svc) def remove_service(svc): for i in range(1, 3): try: svc.remove() return except Exception: pass time.sleep(1) def test_health_check_default(super_client, context, client): env = client.create_environment(name='env-' + random_str()) svc = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80 } }, stackId=env.id) expose_maps = svc.serviceExposeMaps() c1 = super_client.reload(expose_maps[0].instance()) hci = find_one(c1.healthcheckInstances) hcihm = find_one(hci.healthcheckInstanceHostMaps) agent = _get_agent_for_container(context, c1) assert hcihm.healthState == 'initializing' assert c1.healthState == 'initializing' hcihm = _update_healthy(agent, hcihm, c1, super_client) # update unheatlhy, the container should be removed _update_unhealthy(agent, hcihm, c1, super_client) c1 = super_client.wait_success(c1) wait_for_condition(client, c1, lambda x: x.removed is not None) wait_for(lambda: len(svc.serviceExposeMaps()) == 1) remove_service(svc) def test_health_check_bad_agent(super_client, context, client): # Create another host to get the agent from that host host2 = super_client.reload(register_simulated_host(context)) # register one more host to ensure # there is at least one more host # to schedule healtcheck on register_simulated_host(context) env = client.create_environment(name='env-' + random_str()) s = _create_and_activate_svc(client, 'global', name='test', launchConfig={ 'imageUuid': context.image_uuid, 'healthCheck': { 'port': 80, }, "labels": { 'io.rancher.scheduler.global': 'true' } }, stackId=env.id) maps = _wait_until_active_map_count(s, 3, client) expose_map = maps[0] container = super_client.reload(expose_map.instance()) hci = find_one(container.healthcheckInstances) hcihm = None wait_for(lambda: len(hci.healthcheckInstanceHostMaps()) > 1) for h in hci.healthcheckInstanceHostMaps(): if h.hostId != host2.id: hcihm = h break assert hcihm is not None agent_client = _get_agent_client(host2.agent()) assert hcihm.healthState == 'initializing' ts = int(time.time()) with pytest.raises(ApiError) as e: agent_client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm.uuid) assert e.value.error.code == 'CantVerifyHealthcheck' remove_service(s) def test_health_check_reconcile(super_client, new_context): super_client.reload(register_simulated_host(new_context)) register_simulated_host(new_context) client = new_context.client env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': new_context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) initial_len = len(c.healthcheckInstanceHostMaps()) assert initial_len == 2 for h in c.healthcheckInstanceHostMaps(): assert h.healthState == c.healthState hcihm1 = c.healthcheckInstanceHostMaps()[0] hosts = super_client.list_host(uuid=hcihm1.host().uuid) assert len(hosts) == 1 hcihm2 = c.healthcheckInstanceHostMaps()[1] hosts = super_client.list_host(uuid=hcihm2.host().uuid) assert len(hosts) == 1 host2 = hosts[0] agent = _get_agent_for_container(new_context, c) assert hcihm1.healthState == 'initializing' assert hcihm2.healthState == 'initializing' assert c.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm1.uuid) super_client.wait_success(se) hcihm1 = super_client.wait_success(super_client.reload(hcihm1)) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm2.uuid) super_client.wait_success(se) super_client.wait_success(super_client.reload(hcihm2)) wait_for(lambda: super_client.reload(c).healthState == 'healthy') ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something Bad', healthcheckUuid=hcihm1.uuid) super_client.wait_success(se) super_client.wait_success(super_client.reload(hcihm1)) # still healthy as only one host reported healthy wait_for(lambda: super_client.reload(c).healthState == 'healthy') # remove the host 1 host2 = super_client.wait_success(host2.deactivate()) host2 = super_client.wait_success(super_client.delete(host2)) assert host2.state == 'removed' # should be unhealthy as the only health state reported is unhealthy wait_for(lambda: super_client.reload(c).healthState == 'unhealthy') remove_service(service) def test_health_check_all_hosts_removed_reconcile(super_client, new_context): super_client.reload(register_simulated_host(new_context)) client = new_context.client env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': new_context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) wait_for(lambda: super_client.reload(c).state == 'running') initial_len = len(c.healthcheckInstanceHostMaps()) assert initial_len == 1 for h in c.healthcheckInstanceHostMaps(): assert h.healthState == c.healthState hcihm1 = c.healthcheckInstanceHostMaps()[0] hosts = super_client.list_host(uuid=hcihm1.host().uuid) assert len(hosts) == 1 host1 = hosts[0] agent = _get_agent_for_container(new_context, c) assert hcihm1.healthState == 'initializing' assert c.healthState == 'initializing' ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm1.uuid) super_client.wait_success(se) super_client.wait_success(super_client.reload(hcihm1)) wait_for(lambda: super_client.reload(c).healthState == 'healthy') # remove health checker host host1 = super_client.wait_success(host1.deactivate()) host1 = super_client.wait_success(super_client.delete(host1)) assert host1.state == 'removed' # instance should stay as healthy try: wait_for(lambda: super_client.reload(c).healthState == 'unhealthy', timeout=5) except Exception: pass wait_for(lambda: super_client.reload(c).healthState == 'healthy') remove_service(service) def test_hosts_removed_reconcile_when_init(super_client, new_context): super_client.reload(register_simulated_host(new_context)) client = new_context.client env = client.create_environment(name='env-' + random_str()) service = _create_and_activate_svc(client, 1, name='test', launchConfig={ 'imageUuid': new_context.image_uuid, 'healthCheck': { 'port': 80, } }, stackId=env.id) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) initial_len = len(c.healthcheckInstanceHostMaps()) assert initial_len == 1 for h in c.healthcheckInstanceHostMaps(): assert h.healthState == c.healthState hcihm1 = c.healthcheckInstanceHostMaps()[0] hosts = super_client.list_host(uuid=hcihm1.host().uuid) assert len(hosts) == 1 host1 = hosts[0] assert hcihm1.healthState == 'initializing' assert c.healthState == 'initializing' # remove the healthchecking host host1 = super_client.wait_success(host1.deactivate()) host1 = super_client.wait_success(super_client.delete(host1)) assert host1.state == 'removed' # instance should remain as healthy as there are no reporters at this point try: wait_for(lambda: super_client.reload(c).healthState == 'unhealthy', timeout=5) except Exception: pass wait_for(lambda: super_client.reload(c).healthState == 'initializing') remove_service(service) @pytest.mark.skipif('True') def test_health_check_host_remove(super_client, new_context): client = new_context.client # create 4 hosts for healtcheck as one of them would be removed later super_client.reload(register_simulated_host(new_context)) super_client.reload(register_simulated_host(new_context)) super_client.reload(register_simulated_host(new_context)) super_client.reload(register_simulated_host(new_context)) env = client.create_environment(name='env-' + random_str()) service = client.create_service(name='test', launchConfig={ 'imageUuid': new_context.image_uuid, 'healthCheck': { 'port': 80, } }, environmentId=env.id) service = client.wait_success(client.wait_success(service).activate()) assert service.state == 'active' maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) initial_len = len(c.healthcheckInstanceHostMaps()) assert initial_len == 3 for h in c.healthcheckInstanceHostMaps(): assert h.healthState == c.healthState hcihm = c.healthcheckInstanceHostMaps()[0] hosts = super_client.list_host(uuid=hcihm.host().uuid) assert len(hosts) == 1 host = hosts[0] # remove the host host = super_client.wait_success(host.deactivate()) host = super_client.wait_success(super_client.delete(host)) assert host.state == 'removed' # verify that new hostmap was created for the instance wait_for(lambda: len(c.healthcheckInstanceHostMaps()) == initial_len) hcim = None for h in c.healthcheckInstanceHostMaps(): if h.hostId == host.id: if hcihm.state == 'active': hcihm = h break assert hcim is None remove_service(service) def test_healtcheck(new_context, super_client): client = new_context.client image_uuid = new_context.image_uuid stack = client.create_environment(name='env-' + random_str()) host = register_simulated_host(new_context) client.wait_success(host) health_check = {"name": "check1", "responseTimeout": 3, "interval": 4, "healthyThreshold": 5, "unhealthyThreshold": 6, "requestLine": "index.html", "port": 200} launch_config = {"imageUuid": image_uuid, "healthCheck": health_check} service = _create_and_activate_svc(client, 1, name=random_str(), stackId=stack.id, launchConfig=launch_config) maps = _wait_until_active_map_count(service, 1, client) expose_map = maps[0] c = super_client.reload(expose_map.instance()) c_host_id = super_client.reload(c).instanceHostMaps()[0].hostId health_c = super_client. \ list_healthcheckInstance(accountId=service.accountId, instanceId=c.id) assert len(health_c) > 0 health_id = health_c[0].id def validate_container_host(host_maps): for host_map in host_maps: assert host_map.hostId != c_host_id host_maps = _wait_health_host_count(super_client, health_id, 3) validate_container_host(host_maps) # restart the instance # verify that its still has less than 3 healthchecks c = client.wait_success(c.stop()) c = client.wait_success(c.start()) host_maps = _wait_health_host_count(super_client, health_id, 3) validate_container_host(host_maps) # stop instance, add 3 more hosts, start instance and verify # that healthcheckers number was completed to 3, excluding # container's host c = client.wait_success(c.stop()) for i in range(0, 3): host = register_simulated_host(new_context) client.wait_success(host) host_maps = _wait_health_host_count(super_client, health_id, 3) validate_container_host(host_maps) remove_service(service) def _wait_health_host_count(super_client, health_id, count): def active_len(): match = super_client. \ list_healthcheckInstanceHostMap(healthcheckInstanceId=health_id, state='active') if len(match) <= count: return match return wait_for(active_len) def test_external_svc_healthcheck(client, context): env = client.create_environment(name='env-' + random_str()) # test that external service was set with healtcheck health_check = {"name": "check1", "responseTimeout": 3, "interval": 4, "healthyThreshold": 5, "unhealthyThreshold": 6, "requestLine": "index.html", "port": 200} ips = ["72.22.16.5", '192.168.0.10'] service = client.create_externalService(name=random_str(), environmentId=env.id, externalIpAddresses=ips, healthCheck=health_check) service = client.wait_success(service) assert service.healthCheck.name == "check1" assert service.healthCheck.responseTimeout == 3 assert service.healthCheck.interval == 4 assert service.healthCheck.healthyThreshold == 5 assert service.healthCheck.unhealthyThreshold == 6 assert service.healthCheck.requestLine == "index.html" assert service.healthCheck.port == 200 # test rancher-compose export compose_config = env.exportconfig() assert compose_config is not None document = yaml.load(compose_config.rancherComposeConfig) assert document['services'][service.name]['health_check'] is not None def _update_healthy(agent, hcihm, c, super_client): ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='UP', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'healthy' wait_for(lambda: super_client.reload(c).healthState == 'healthy') return hcihm def _update_unhealthy(agent, hcihm, c, super_client): ts = int(time.time()) client = _get_agent_client(agent) se = client.create_service_event(externalTimestamp=ts, reportedHealth='Something bad', healthcheckUuid=hcihm.uuid) super_client.wait_success(se) hcihm = super_client.wait_success(super_client.reload(hcihm)) assert hcihm.healthState == 'unhealthy' wait_for(lambda: super_client.reload(c).healthState == 'unhealthy') def _wait_until_active_map_count(service, count, client): def wait_for_map_count(service): m = client. \ list_serviceExposeMap(serviceId=service.id, state='active') return len(m) == count wait_for_condition(client, service, wait_for_map_count) return client. \ list_serviceExposeMap(serviceId=service.id, state='active') def test_global_service_health(new_context): client = new_context.client host1 = new_context.host host2 = register_simulated_host(new_context) client.wait_success(host1) client.wait_success(host2) # create environment and services env = client.create_environment(name='env-' + random_str()) image_uuid = new_context.image_uuid labels = {"io.rancher.container.start_once": "true"} secondary_lc = {"imageUuid": image_uuid, "name": "secondary", "labels": labels} launch_config = { "imageUuid": image_uuid, "labels": { 'io.rancher.scheduler.global': 'true' } } svc = client.create_service(name=random_str(), environmentId=env.id, launchConfig=launch_config, secondaryLaunchConfigs=[secondary_lc]) svc = client.wait_success(svc) assert svc.state == "inactive" svc = client.wait_success(svc.activate(), 120) wait_for(lambda: client.reload(svc).healthState == 'healthy') # stop instances c1 = _validate_compose_instance_start(client, svc, env, "1", "secondary") c2 = _validate_compose_instance_start(client, svc, env, "2", "secondary") client.wait_success(c1.stop()) client.wait_success(c2.stop()) wait_for(lambda: client.reload(svc).healthState == 'healthy') def _validate_compose_instance_start(client, service, env, number, launch_config_name=None): cn = launch_config_name + "-" if \ launch_config_name is not None else "" name = env.name + "-" + service.name + "-" + cn + number def wait_for_map_count(service): instances = client. \ list_container(name=name, state="running") return len(instances) == 1 wait_for(lambda: wait_for_condition(client, service, wait_for_map_count), timeout=5) instances = client. \ list_container(name=name, state="running") return instances[0] def test_stack_health_state(super_client, context, client): c = client env = client.create_environment(name='env-' + random_str()) svc = client.create_service(name='test', launchConfig={ 'imageUuid': context.image_uuid }, environmentId=env.id) svc = client.wait_success(client.wait_success(svc).activate()) assert svc.state == 'active' wait_for(lambda: super_client.reload(svc).healthState == 'healthy') wait_for(lambda: c.reload(env).healthState == 'healthy') remove_service(svc) wait_for(lambda: super_client.reload(svc).state == 'removed') time.sleep(3) wait_for(lambda: c.reload(env).healthState == 'healthy') def _create_and_activate_svc(client, count, **kw): svc = client.create_service(kw) svc = client.wait_success(svc) assert svc.state == "inactive" svc.activate() # wait for service instances to become running if count == 'global': time.sleep(2) wait_for(lambda: len(client.reload(svc).instances()) >= 1) else: wait_for(lambda: len(client.reload(svc).instances()) == count) for i in client.reload(svc).instances(): wait_for(lambda: client.reload(i).state == 'running') return client.reload(svc)
yingzha/blocks-examples
refs/heads/master
parity_problem/__init__.py
6
from __future__ import print_function import random import numpy as np import theano import theano.tensor as T from fuel.streams import DataStream from fuel.datasets import IterableDataset from blocks.bricks import Linear, Logistic from blocks.bricks.recurrent import LSTM from blocks.bricks.cost import BinaryCrossEntropy from blocks.initialization import Constant, IsotropicGaussian from blocks.algorithms import GradientDescent, Adam from blocks.graph import ComputationGraph from blocks.extensions import FinishAfter, Printing, ProgressBar from blocks.extensions.monitoring import (DataStreamMonitoring, TrainingDataMonitoring) from blocks.main_loop import MainLoop def generate_data(max_seq_length, batch_size, num_batches): x = [] y = [] for i in range(num_batches): # it's important to include sequences of different length in # the training data in order the model was able to learn something seq_length = random.randint(1, max_seq_length) # each batch consists of sequences of equal length # x_batch has shape (T, B, F=1) x_batch = np.random.random_integers(0, 1, (seq_length, batch_size, 1)) # y_batch has shape (B, F=1) y_batch = x_batch.sum(axis=(0,)) % 2 x.append(x_batch.astype(theano.config.floatX)) y.append(y_batch.astype(theano.config.floatX)) return {'x': x, 'y': y} def main(max_seq_length, lstm_dim, batch_size, num_batches, num_epochs): dataset_train = IterableDataset(generate_data(max_seq_length, batch_size, num_batches)) dataset_test = IterableDataset(generate_data(max_seq_length, batch_size, 100)) stream_train = DataStream(dataset=dataset_train) stream_test = DataStream(dataset=dataset_test) x = T.tensor3('x') y = T.matrix('y') # we need to provide data for the LSTM layer of size 4 * ltsm_dim, see # LSTM layer documentation for the explanation x_to_h = Linear(1, lstm_dim * 4, name='x_to_h', weights_init=IsotropicGaussian(), biases_init=Constant(0.0)) lstm = LSTM(lstm_dim, name='lstm', weights_init=IsotropicGaussian(), biases_init=Constant(0.0)) h_to_o = Linear(lstm_dim, 1, name='h_to_o', weights_init=IsotropicGaussian(), biases_init=Constant(0.0)) x_transform = x_to_h.apply(x) h, c = lstm.apply(x_transform) # only values of hidden units of the last timeframe are used for # the classification y_hat = h_to_o.apply(h[-1]) y_hat = Logistic().apply(y_hat) cost = BinaryCrossEntropy().apply(y, y_hat) cost.name = 'cost' lstm.initialize() x_to_h.initialize() h_to_o.initialize() cg = ComputationGraph(cost) algorithm = GradientDescent(cost=cost, parameters=cg.parameters, step_rule=Adam()) test_monitor = DataStreamMonitoring(variables=[cost], data_stream=stream_test, prefix="test") train_monitor = TrainingDataMonitoring(variables=[cost], prefix="train", after_epoch=True) main_loop = MainLoop(algorithm, stream_train, extensions=[test_monitor, train_monitor, FinishAfter(after_n_epochs=num_epochs), Printing(), ProgressBar()]) main_loop.run() print('Learned weights:') for layer in (x_to_h, lstm, h_to_o): print("Layer '%s':" % layer.name) for param in layer.parameters: print(param.name, ': ', param.get_value()) print() return main_loop
chrisidefix/devide
refs/heads/master
modules/vtk_basic/vtkGenericGlyph3DFilter.py
7
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkGenericGlyph3DFilter(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkGenericGlyph3DFilter(), 'Processing.', ('vtkGenericDataSet', 'vtkPolyData'), ('vtkPolyData',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
llonchj/sentry
refs/heads/master
src/sentry/utils/cursors.py
29
""" sentry.utils.cursors ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from collections import Sequence class Cursor(object): def __init__(self, value, offset=0, is_prev=False, has_results=None): # XXX: ceil is not entirely correct here, but it's a simple hack # that solves most problems self.value = long(value) self.offset = int(offset) self.is_prev = bool(is_prev) self.has_results = has_results def __str__(self): return '%s:%s:%s' % (self.value, self.offset, int(self.is_prev)) def __repr__(self): return '<%s: value=%s offset=%s is_prev=%s>' % ( type(self), self.value, self.offset, int(self.is_prev)) def __nonzero__(self): return self.has_results @classmethod def from_string(cls, value): bits = value.split(':') if len(bits) != 3: raise ValueError try: bits = float(bits[0]), int(bits[1]), int(bits[2]) except (TypeError, ValueError): raise ValueError return cls(*bits) class CursorResult(Sequence): def __init__(self, results, next, prev): self.results = results self.next = next self.prev = prev def __len__(self): return len(self.results) def __iter__(self): return iter(self.results) def __getitem__(self, key): return self.results[key] def __repr__(self): return '<%s: results=%s>' % (type(self).__name__, len(self.results)) @classmethod def from_ids(self, id_list, key=None, limit=100, cursor=None): from sentry.models import Group group_map = Group.objects.in_bulk(id_list) results = [] for g_id in id_list: try: results.append(group_map[g_id]) except KeyError: pass return build_cursor( results=results, key=key, cursor=cursor, limit=limit, ) def build_cursor(results, key, limit=100, cursor=None): if cursor is None: cursor = Cursor(0, 0, 0) value = cursor.value offset = cursor.offset is_prev = cursor.is_prev num_results = len(results) if is_prev: has_prev = num_results > limit num_results = len(results) elif value or offset: # It's likely that there's a previous page if they passed us either offset values has_prev = True else: # we don't know has_prev = False # Default cursor if not present if is_prev: next_value = value next_offset = offset has_next = True elif num_results: if not value: value = long(key(results[0])) # Are there more results than whats on the current page? has_next = num_results > limit # Determine what our next cursor is by ensuring we have a unique offset next_value = long(key(results[-1])) if next_value == value: next_offset = offset + limit else: next_offset = 0 result_iter = reversed(results) # skip the last result result_iter.next() for result in result_iter: if long(key(result)) == next_value: next_offset += 1 else: break else: next_value = value next_offset = offset has_next = False # Determine what our pervious cursor is by ensuring we have a unique offset if is_prev and num_results: prev_value = long(key(results[0])) if num_results > 2: i = 1 while i < num_results and prev_value == long(key(results[i])): i += 1 i -= 1 else: i = 0 # if we iterated every result and the offset didn't change, we need # to simply add the current offset to our total results (visible) if prev_value == value: prev_offset = offset + i else: prev_offset = i else: # previous cursor is easy if we're paginating forward prev_value = value prev_offset = offset # Truncate the list to our original result size now that we've determined the next page results = results[:limit] next_cursor = Cursor(next_value or 0, next_offset, False, has_next) prev_cursor = Cursor(prev_value or 0, prev_offset, True, has_prev) return CursorResult( results=results, next=next_cursor, prev=prev_cursor, )
WadeBarnes/TheOrgBook
refs/heads/master
tob-api/api_indy/indy/__init__.py
12133432
sadaf2605/django
refs/heads/master
tests/i18n/other/__init__.py
12133432
Prashant-Surya/addons-server
refs/heads/master
src/olympia/files/tests/test_models.py
1
# -*- coding: utf-8 -*- import hashlib import json import os import shutil import tempfile import zipfile from datetime import datetime from django import forms from django.core.files.storage import default_storage as storage from django.conf import settings from django.test.utils import override_settings import mock import pytest from mock import patch from olympia import amo from olympia.amo.tests import TestCase from olympia.amo.utils import rm_local_tmp_dir, chunked from olympia.addons.models import Addon from olympia.applications.models import AppVersion from olympia.files.models import ( EXTENSIONS, File, FileUpload, FileValidation, nfd_str, track_file_status_change, ) from olympia.files.helpers import copyfileobj from olympia.files.utils import check_xpi_info, parse_addon, parse_xpi from olympia.versions.models import Version pytestmark = pytest.mark.django_db class UploadTest(TestCase, amo.tests.AMOPaths): """ Base for tests that mess with file uploads, safely using temp directories. """ def file_path(self, *args, **kw): return self.file_fixture_path(*args, **kw) def get_upload(self, filename=None, abspath=None, validation=None): xpi = open(abspath if abspath else self.file_path(filename)).read() upload = FileUpload.from_post([xpi], filename=abspath or filename, size=1234) # Simulate what fetch_manifest() does after uploading an app. upload.validation = (validation or json.dumps(dict(errors=0, warnings=1, notices=2, metadata={}, messages=[]))) upload.save() return upload class TestFile(TestCase, amo.tests.AMOPaths): """ Tests the methods of the File model. """ fixtures = ['base/addon_3615', 'base/addon_5579'] def test_get_absolute_url(self): f = File.objects.get(id=67442) url = f.get_absolute_url(src='src') expected = ('/firefox/downloads/file/67442/' 'delicious_bookmarks-2.1.072-fx.xpi?src=src') assert url.endswith(expected), url def test_get_url_path(self): file_ = File.objects.get(id=67442) assert file_.get_url_path('src') == \ file_.get_absolute_url(src='src') def test_get_signed_url(self): file_ = File.objects.get(id=67442) url = file_.get_signed_url('src') expected = ('/api/v3/file/67442/' 'delicious_bookmarks-2.1.072-fx.xpi?src=src') assert url.endswith(expected), url def check_delete(self, file_, filename): """Test that when the File object is deleted, it is removed from the filesystem.""" try: with storage.open(filename, 'w') as f: f.write('sample data\n') assert storage.exists(filename) file_.delete() assert not storage.exists(filename) finally: if storage.exists(filename): storage.delete(filename) def test_delete_by_version(self): """Test that version (soft)delete doesn't delete the file.""" f = File.objects.get(pk=67442) try: with storage.open(f.file_path, 'w') as fi: fi.write('sample data\n') assert storage.exists(f.file_path) f.version.delete() assert storage.exists(f.file_path) finally: if storage.exists(f.file_path): storage.delete(f.file_path) def test_delete_file_path(self): f = File.objects.get(pk=67442) self.check_delete(f, f.file_path) def test_delete_mirror_file_path(self): f = File.objects.get(pk=67442) self.check_delete(f, f.mirror_file_path) def test_delete_no_file(self): # test that the file object can be deleted without the file # being present file = File.objects.get(pk=74797) filename = file.file_path assert not os.path.exists(filename), 'File exists at: %s' % filename file.delete() def test_delete_signal(self): """Test that if there's no filename, the signal is ok.""" file = File.objects.get(pk=67442) file.update(filename='') file.delete() @mock.patch('olympia.files.models.File.hide_disabled_file') def test_disable_signal(self, hide_mock): f = File.objects.get(pk=67442) f.status = amo.STATUS_PUBLIC f.save() assert not hide_mock.called f.status = amo.STATUS_DISABLED f.save() assert hide_mock.called @mock.patch('olympia.files.models.File.unhide_disabled_file') def test_unhide_on_enable(self, unhide_mock): f = File.objects.get(pk=67442) f.status = amo.STATUS_PUBLIC f.save() assert not unhide_mock.called f = File.objects.get(pk=67442) f.status = amo.STATUS_DISABLED f.save() assert not unhide_mock.called f = File.objects.get(pk=67442) f.status = amo.STATUS_PUBLIC f.save() assert unhide_mock.called def test_unhide_disabled_files(self): f = File.objects.get(pk=67442) f.status = amo.STATUS_PUBLIC with storage.open(f.guarded_file_path, 'wb') as fp: fp.write('some data\n') f.unhide_disabled_file() assert storage.exists(f.file_path) assert storage.open(f.file_path).size def test_unhide_disabled_file_mirroring(self): tmp = tempfile.mkdtemp() self.addCleanup(lambda: shutil.rmtree(tmp)) fo = File.objects.get(pk=67442) with storage.open(fo.file_path, 'wb') as fp: fp.write('<pretend this is an xpi>') with storage.open(fo.mirror_file_path, 'wb') as fp: fp.write('<pretend this is an xpi>') fo.status = amo.STATUS_DISABLED fo.save() assert not storage.exists(fo.file_path), 'file not hidden' assert not storage.exists(fo.mirror_file_path), ( 'file not removed from mirror') fo = File.objects.get(pk=67442) fo.status = amo.STATUS_PUBLIC fo.save() assert storage.exists(fo.file_path), 'file not un-hidden' assert storage.exists(fo.mirror_file_path), ( 'file not copied back to mirror') @mock.patch('olympia.files.models.File.copy_to_mirror') def test_copy_to_mirror_on_status_change(self, copy_mock): assert amo.STATUS_UNREVIEWED not in amo.MIRROR_STATUSES f = File.objects.get(pk=67442) f.status = amo.STATUS_UNREVIEWED f.save() assert not copy_mock.called copy_mock.reset_mock() for status in amo.MIRROR_STATUSES: f = File.objects.get(pk=67442) f.status = status f.save() assert copy_mock.called, "Copy not called" f.status = amo.STATUS_UNREVIEWED f.save() copy_mock.reset_mock() def test_latest_url(self): # With platform. f = File.objects.get(id=74797) base = '/firefox/downloads/latest{2}/' expected = base + '{1}/platform:3/addon-{0}-latest.xpi' actual = f.latest_xpi_url() assert expected.format( f.version.addon_id, f.version.addon.slug, '') == actual actual = f.latest_xpi_url(beta=True) assert expected.format( f.version.addon_id, f.version.addon.slug, '-beta') == actual # No platform. f = File.objects.get(id=67442) expected = base + '{1}/addon-{0}-latest.xpi' actual = f.latest_xpi_url() assert expected.format( f.version.addon_id, f.version.addon.slug, '') == actual actual = f.latest_xpi_url(beta=True) assert expected.format( f.version.addon_id, f.version.addon.slug, '-beta') == actual def test_eula_url(self): f = File.objects.get(id=67442) assert f.eula_url() == '/en-US/firefox/addon/3615/eula/67442' def test_generate_filename(self): f = File.objects.get(id=67442) assert f.generate_filename() == 'delicious_bookmarks-2.1.072-fx.xpi' def test_pretty_filename(self): f = File.objects.get(id=67442) f.generate_filename() assert f.pretty_filename() == 'delicious_bookmarks-2.1.072-fx.xpi' def test_pretty_filename_short(self): f = File.objects.get(id=67442) f.version.addon.name = 'A Place Where The Sea Remembers Your Name' f.generate_filename() assert f.pretty_filename() == 'a_place_where_the...-2.1.072-fx.xpi' def test_generate_filename_platform_specific(self): f = File.objects.get(id=67442) f.platform = amo.PLATFORM_MAC.id assert f.generate_filename() == ( 'delicious_bookmarks-2.1.072-fx-mac.xpi') def test_generate_filename_many_apps(self): f = File.objects.get(id=67442) f.version.compatible_apps = (amo.FIREFOX, amo.THUNDERBIRD) assert f.generate_filename() == 'delicious_bookmarks-2.1.072-fx+tb.xpi' def test_generate_filename_ja(self): f = File() f.version = Version(version='0.1.7') f.version.compatible_apps = (amo.FIREFOX,) f.version.addon = Addon(name=u' フォクすけ といっしょ') assert f.generate_filename() == 'addon-0.1.7-fx.xpi' def clean_files(self, f): if f.mirror_file_path and storage.exists(f.mirror_file_path): storage.delete(f.mirror_file_path) if not storage.exists(f.file_path): with storage.open(f.file_path, 'w') as fp: fp.write('sample data\n') def test_copy_to_mirror(self): f = File.objects.get(id=67442) self.clean_files(f) f.copy_to_mirror() assert storage.exists(f.mirror_file_path) def test_generate_hash(self): f = File() f.version = Version.objects.get(pk=81551) fn = self.xpi_path('delicious_bookmarks-2.1.106-fx') assert f.generate_hash(fn).startswith('sha256:fd277d45ab44f6240e') def test_file_is_mirrorable(self): f = File.objects.get(pk=67442) assert f.is_mirrorable() f.update(status=amo.STATUS_DISABLED) assert not f.is_mirrorable() def test_addon(self): f = File.objects.get(pk=67442) addon_id = f.version.addon_id addon = Addon.objects.no_cache().get(pk=addon_id) addon.update(status=amo.STATUS_DELETED) assert f.addon.id == addon_id class TestTrackFileStatusChange(TestCase): def create_file(self, **kwargs): addon = Addon() addon.save() ver = Version(version='0.1') ver.addon = addon ver.save() f = File(**kwargs) f.version = ver f.save() return f def test_track_stats_on_new_file(self): with patch('olympia.files.models.track_file_status_change') as mock_: f = self.create_file() mock_.assert_called_with(f) def test_track_stats_on_updated_file(self): f = self.create_file() with patch('olympia.files.models.track_file_status_change') as mock_: f.update(status=amo.STATUS_PUBLIC) f.reload() assert mock_.call_args[0][0].status == f.status def test_ignore_non_status_changes(self): f = self.create_file() with patch('olympia.files.models.track_file_status_change') as mock_: f.update(size=1024) assert not mock_.called, ( 'Unexpected call: {}'.format(self.mock_.call_args) ) def test_increment_file_status(self): f = self.create_file(status=amo.STATUS_PUBLIC) with patch('olympia.files.models.statsd.incr') as mock_incr: track_file_status_change(f) mock_incr.assert_any_call( 'file_status_change.all.status_{}'.format(amo.STATUS_PUBLIC) ) def test_increment_jetpack_sdk_only_status(self): f = self.create_file( status=amo.STATUS_PUBLIC, jetpack_version='1.0', no_restart=True, requires_chrome=False, ) with patch('olympia.files.models.statsd.incr') as mock_incr: track_file_status_change(f) mock_incr.assert_any_call( 'file_status_change.jetpack_sdk_only.status_{}' .format(amo.STATUS_PUBLIC) ) class TestParseXpi(TestCase): def setUp(self): super(TestParseXpi, self).setUp() for version in ('3.0', '3.6.*'): AppVersion.objects.create(application=amo.FIREFOX.id, version=version) def parse(self, addon=None, filename='extension.xpi'): path = 'src/olympia/files/fixtures/files/' + filename xpi = os.path.join(settings.ROOT, path) return parse_addon(open(xpi), addon) def test_parse_basics(self): # Everything but the apps exp = {'guid': 'guid@xpi', 'name': 'xpi name', 'summary': 'xpi description', 'version': '0.1', 'homepage': 'http://homepage.com', 'type': 1} parsed = self.parse() for key, value in exp.items(): assert parsed[key] == value def test_parse_apps(self): exp = (amo.FIREFOX, amo.FIREFOX.id, AppVersion.objects.get(version='3.0'), AppVersion.objects.get(version='3.6.*')) assert self.parse()['apps'] == [exp] def test_parse_apps_bad_appver(self): AppVersion.objects.all().delete() assert self.parse()['apps'] == [] @mock.patch.object(amo.FIREFOX, 'guid', 'iamabadguid') def test_parse_apps_bad_guid(self): assert self.parse()['apps'] == [] def test_guid_match(self): addon = Addon.objects.create(guid='guid@xpi', type=1) parsed = self.parse(addon) assert parsed['guid'] == 'guid@xpi' assert not parsed['is_experiment'] def test_guid_nomatch(self): addon = Addon.objects.create(guid='xxx', type=1) with self.assertRaises(forms.ValidationError) as e: self.parse(addon) assert e.exception.messages[0].startswith('The add-on ID in your') def test_guid_dupe(self): Addon.objects.create(guid='guid@xpi', type=1) with self.assertRaises(forms.ValidationError) as e: self.parse() assert e.exception.messages == ['Duplicate add-on ID found.'] def test_guid_no_dupe_webextension_no_id(self): Addon.objects.create(guid=None, type=1) self.parse(filename='webextension_no_id.xpi') def test_guid_dupe_webextension_guid_given(self): Addon.objects.create(guid='@webextension-guid', type=1) with self.assertRaises(forms.ValidationError) as e: self.parse(filename='webextension.xpi') assert e.exception.messages == ['Duplicate add-on ID found.'] def test_guid_nomatch_webextension(self): addon = Addon.objects.create( guid='e2c45b71-6cbb-452c-97a5-7e8039cc6535', type=1) with self.assertRaises(forms.ValidationError) as e: self.parse(addon, filename='webextension.xpi') assert e.exception.messages[0].startswith('The add-on ID in your') def test_guid_nomatch_webextension_supports_no_guid(self): # addon.guid is generated if none is set originally so it doesn't # really matter what we set here, we allow updates to an add-on # with a XPI that has no id. addon = Addon.objects.create( guid='e2c45b71-6cbb-452c-97a5-7e8039cc6535', type=1) info = self.parse(addon, filename='webextension_no_id.xpi') assert info['guid'] == addon.guid def test_match_type(self): addon = Addon.objects.create(guid='guid@xpi', type=4) with self.assertRaises(forms.ValidationError) as e: self.parse(addon) assert e.exception.messages[0].startswith( '<em:type> in your install.rdf') def test_match_type_extension_for_telemetry_experiments(self): parsed = self.parse(filename='telemetry_experiment.xpi') # See bug 1220097: telemetry experiments (type 128) map to extensions. assert parsed['type'] == amo.ADDON_EXTENSION assert parsed['is_experiment'] def test_match_type_extension_for_webextension_experiments(self): parsed = self.parse(filename='webextension_experiment.xpi') # See #3315: webextension experiments (type 256) map to extensions. assert parsed['type'] == amo.ADDON_EXTENSION assert parsed['is_experiment'] def test_match_type_extension_for_webextensions(self): parsed = self.parse(filename='webextension.xpi') assert parsed['type'] == amo.ADDON_EXTENSION assert parsed['is_webextension'] def test_xml_for_extension(self): addon = Addon.objects.create(guid='guid@xpi', type=1) with self.assertRaises(forms.ValidationError) as e: self.parse(addon, filename='search.xml') assert e.exception.messages[0].startswith( '<em:type> in your install.rdf') def test_unknown_app(self): data = self.parse(filename='theme-invalid-app.jar') assert data['apps'] == [] def test_bad_zipfile(self): with self.assertRaises(forms.ValidationError) as e: parse_addon('baxmldzip.xpi', None) assert e.exception.messages == ['Could not parse the manifest file.'] def test_parse_dictionary(self): result = self.parse(filename='dictionary-test.xpi') assert result['type'] == amo.ADDON_DICT # We detected it as a dictionary but it's not using the explicit # dictionary type, so it's not restartless. assert not result['no_restart'] def test_parse_dictionary_explicit_type(self): result = self.parse(filename='dictionary-explicit-type-test.xpi') assert result['type'] == amo.ADDON_DICT assert result['no_restart'] def test_parse_dictionary_extension(self): result = self.parse(filename='dictionary-extension-test.xpi') assert result['type'] == amo.ADDON_EXTENSION # It's not a real dictionary, it's an extension, so it's not # restartless. assert not result['no_restart'] def test_parse_jar(self): result = self.parse(filename='theme.jar') assert result['type'] == amo.ADDON_THEME def test_parse_theme_by_type(self): result = self.parse(filename='theme-type.xpi') assert result['type'] == amo.ADDON_THEME def test_parse_theme_with_internal_name(self): result = self.parse(filename='theme-internal-name.xpi') assert result['type'] == amo.ADDON_THEME def test_parse_no_type(self): result = self.parse(filename='no-type.xpi') assert result['type'] == amo.ADDON_EXTENSION def test_parse_invalid_type(self): result = self.parse(filename='invalid-type.xpi') assert result['type'] == amo.ADDON_EXTENSION def test_parse_langpack(self): result = self.parse(filename='langpack.xpi') assert result['type'] == amo.ADDON_LPAPP assert result['no_restart'] def test_good_version_number(self): check_xpi_info({'guid': 'guid', 'version': '1.2a-b+32*__yeah'}) check_xpi_info({'guid': 'guid', 'version': '1' * 32}) def test_bad_version_number(self): with self.assertRaises(forms.ValidationError) as e: check_xpi_info({'guid': 'guid', 'version': 'bad #version'}) msg = e.exception.messages[0] assert msg.startswith('Version numbers should only contain'), msg def test_long_version_number(self): with self.assertRaises(forms.ValidationError) as e: check_xpi_info({'guid': 'guid', 'version': '1' * 33}) msg = e.exception.messages[0] assert msg == 'Version numbers should have fewer than 32 characters.' def test_strict_compat_undefined(self): result = self.parse() assert not result['strict_compatibility'] def test_strict_compat_enabled(self): result = self.parse(filename='strict-compat.xpi') assert result['strict_compatibility'] class TestParseAlternateXpi(TestCase, amo.tests.AMOPaths): # This install.rdf is completely different from our other xpis. def setUp(self): super(TestParseAlternateXpi, self).setUp() for version in ('3.0', '4.0b3pre'): AppVersion.objects.create(application=amo.FIREFOX.id, version=version) def parse(self, filename='alt-rdf.xpi'): return parse_addon(open(self.file_fixture_path(filename))) def test_parse_basics(self): # Everything but the apps. exp = {'guid': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}', 'name': 'Delicious Bookmarks', 'summary': 'Access your bookmarks wherever you go and keep ' 'them organized no matter how many you have.', 'homepage': 'http://delicious.com', 'type': amo.ADDON_EXTENSION, 'version': '2.1.106'} parsed = self.parse() for key, value in exp.items(): assert parsed[key] == value def test_parse_apps(self): exp = (amo.FIREFOX, amo.FIREFOX.id, AppVersion.objects.get(version='3.0'), AppVersion.objects.get(version='4.0b3pre')) assert self.parse()['apps'] == [exp] @mock.patch('olympia.files.utils.rdflib.Graph') def test_no_manifest_node(self, graph_mock): rdf_mock = mock.Mock() graph_mock.return_value.parse.return_value = rdf_mock rdf_mock.triples.return_value = iter([]) rdf_mock.subjects.return_value = iter([]) with self.assertRaises(forms.ValidationError) as e: self.parse() assert e.exception.messages == ['Could not parse the manifest file.'] class TestFileUpload(UploadTest): fixtures = ['base/appversion', 'base/addon_3615'] def setUp(self): super(TestFileUpload, self).setUp() self.data = 'file contents' def upload(self, **params): # The data should be in chunks. data = [''.join(x) for x in chunked(self.data, 3)] return FileUpload.from_post(data, 'filename.xpi', len(self.data), **params) def test_from_post_write_file(self): assert storage.open(self.upload().path).read() == self.data def test_from_post_filename(self): upload = self.upload() assert upload.uuid assert upload.name == '{0}_filename.xpi'.format(upload.uuid.hex) def test_from_post_hash(self): hash = hashlib.sha256(self.data).hexdigest() assert self.upload().hash == 'sha256:%s' % hash def test_from_post_extra_params(self): upload = self.upload(automated_signing=True, addon_id=3615) assert upload.addon_id == 3615 assert upload.automated_signing def test_from_post_is_one_query(self): with self.assertNumQueries(1): self.upload(automated_signing=True, addon_id=3615) def test_save_without_validation(self): upload = FileUpload.objects.create() assert not upload.valid def test_save_with_validation(self): upload = FileUpload.objects.create( validation='{"errors": 0, "metadata": {}}') assert upload.valid upload = FileUpload.objects.create(validation='{"errors": 1}') assert not upload.valid with self.assertRaises(ValueError): upload = FileUpload.objects.create(validation='wtf') def test_update_with_validation(self): upload = FileUpload.objects.create() upload.validation = '{"errors": 0, "metadata": {}}' upload.save() assert upload.valid def test_update_without_validation(self): upload = FileUpload.objects.create() upload.save() assert not upload.valid def test_ascii_names(self): upload = FileUpload.from_post('', u'jétpack.xpi', 0) assert 'xpi' in upload.name upload = FileUpload.from_post('', u'мозила_србија-0.11-fx.xpi', 0) assert 'xpi' in upload.name upload = FileUpload.from_post('', u'フォクすけといっしょ.xpi', 0) assert 'xpi' in upload.name upload = FileUpload.from_post('', u'\u05d0\u05d5\u05e1\u05e3.xpi', 0) assert 'xpi' in upload.name def test_validator_sets_binary_via_extensions(self): validation = json.dumps({ "errors": 0, "success": True, "warnings": 0, "notices": 0, "message_tree": {}, "messages": [], "metadata": { "contains_binary_extension": True, "version": "1.0", "name": "gK0Bes Bot", "id": "gkobes@gkobes", } }) upload = self.get_upload(filename='extension.xpi', validation=validation) version = Version.objects.filter(addon__pk=3615)[0] plat = amo.PLATFORM_LINUX.id file_ = File.from_upload(upload, version, plat) assert file_.binary def test_validator_sets_binary_via_content(self): validation = json.dumps({ "errors": 0, "success": True, "warnings": 0, "notices": 0, "message_tree": {}, "messages": [], "metadata": { "contains_binary_content": True, "version": "1.0", "name": "gK0Bes Bot", "id": "gkobes@gkobes", } }) upload = self.get_upload(filename='extension.xpi', validation=validation) version = Version.objects.filter(addon__pk=3615)[0] file_ = File.from_upload(upload, version, amo.PLATFORM_LINUX.id) assert file_.binary def test_validator_sets_require_chrome(self): validation = json.dumps({ "errors": 0, "success": True, "warnings": 0, "notices": 0, "message_tree": {}, "messages": [], "metadata": { "version": "1.0", "name": "gK0Bes Bot", "id": "gkobes@gkobes", "requires_chrome": True } }) upload = self.get_upload(filename='extension.xpi', validation=validation) version = Version.objects.filter(addon__pk=3615)[0] file_ = File.from_upload(upload, version, amo.PLATFORM_LINUX.id) assert file_.requires_chrome @override_settings(VALIDATOR_MESSAGE_LIMIT=10) def test_limit_validator_warnings(self): data = { "errors": 0, "success": True, "warnings": 500, "notices": 0, "message_tree": {}, "messages": [{ "context": ["<code>", None], "description": ["Something something, see " "https://bugzilla.mozilla.org/"], "column": 0, "line": 1, "file": "chrome/content/down.html", "tier": 2, "message": "Some warning", "type": "warning", "id": [], "uid": "bb9948b604b111e09dfdc42c0301fe38"}] * 12, "metadata": {} } upload = FileUpload(validation=json.dumps(data)) validation = upload.processed_validation assert len(validation['messages']) == 11 assert 'truncated' in validation['messages'][0]['message'] assert validation['messages'][0]['type'] == 'warning' @override_settings(VALIDATOR_MESSAGE_LIMIT=10) def test_limit_validator_compat_errors(self): data = { "errors": 0, "success": True, "warnings": 100, "notices": 0, "message_tree": {}, "compatibility_summary": {"errors": 100, "warnings": 0, "notices": 0}, "messages": [ { "context": ["<code>", None], "description": ["Something something, see " "https://bugzilla.mozilla.org/"], "column": 0, "line": 1, "file": "chrome/content/down.html", "tier": 2, "message": "Some warning", "type": "warning", "compatibility_type": "warning", "id": [], "uid": "bb9948b604b111e09dfdc42c0301fe38" }, { "context": ["<code>", None], "description": ["Something something, see " "https://bugzilla.mozilla.org/"], "column": 0, "line": 1, "file": "chrome/content/down.html", "tier": 2, "message": "Some error", "type": "warning", "compatibility_type": "warning", "id": [], "uid": "bb9948b604b111e09dfdc42c0301fe38" } ] * 50, "metadata": {} } upload = FileUpload(validation=json.dumps(data)) validation = upload.processed_validation assert len(validation['messages']) == 11 assert 'truncated' in validation['messages'][0]['message'] assert validation['messages'][0]['type'] == 'warning' upload = FileUpload(validation=json.dumps(data), compat_with_app=1) validation = upload.processed_validation assert len(validation['messages']) == 11 assert 'truncated' in validation['messages'][0]['message'] assert validation['messages'][0]['type'] == 'error' @override_settings(VALIDATOR_MESSAGE_LIMIT=10) def test_limit_validator_errors(self): data = { "errors": 100, "success": True, "warnings": 100, "notices": 0, "message_tree": {}, "messages": [ { "context": ["<code>", None], "description": ["Something something, see " "https://bugzilla.mozilla.org/"], "column": 0, "line": 1, "file": "chrome/content/down.html", "tier": 2, "message": "Some warning", "type": "warning", "id": [], "uid": "bb9948b604b111e09dfdc42c0301fe38" }, { "context": ["<code>", None], "description": ["Something something, see " "https://bugzilla.mozilla.org/"], "column": 0, "line": 1, "file": "chrome/content/down.html", "tier": 2, "message": "Some error", "type": "error", "id": [], "uid": "bb9948b604b111e09dfdc42c0301fe38" } ] * 50, "metadata": {} } upload = FileUpload(validation=json.dumps(data)) validation = upload.processed_validation assert len(validation['messages']) == 11 assert 'truncated' in validation['messages'][0]['message'] assert validation['messages'][0]['type'] == 'error' def test_file_upload_passed_auto_validation_passed(): upload = FileUpload(validation=json.dumps({ 'passed_auto_validation': True, })) assert upload.passed_auto_validation def test_file_upload_passed_auto_validation_failed(): upload = FileUpload(validation=json.dumps({ 'passed_auto_validation': False, })) assert not upload.passed_auto_validation def test_file_upload_passed_all_validations_processing(): upload = FileUpload(valid=False, validation='') assert not upload.passed_all_validations def test_file_upload_passed_all_validations_valid(): upload = FileUpload( valid=True, validation=json.dumps(amo.VALIDATOR_SKELETON_RESULTS)) assert upload.passed_all_validations def test_file_upload_passed_all_validations_invalid(): upload = FileUpload( valid=False, validation=json.dumps(amo.VALIDATOR_SKELETON_RESULTS)) assert not upload.passed_all_validations class TestFileFromUpload(UploadTest): def setUp(self): super(TestFileFromUpload, self).setUp() appver = {amo.FIREFOX: ['3.0', '3.6', '3.6.*', '4.0b6'], amo.MOBILE: ['0.1', '2.0a1pre']} for app, versions in appver.items(): for version in versions: AppVersion(application=app.id, version=version).save() self.platform = amo.PLATFORM_MAC.id self.addon = Addon.objects.create(guid='guid@jetpack', type=amo.ADDON_EXTENSION, name='xxx') self.version = Version.objects.create(addon=self.addon) def upload(self, name): # Add in `.xpi` if the filename doesn't have a valid file extension. if os.path.splitext(name)[-1] not in EXTENSIONS: name = name + '.xpi' v = json.dumps(dict(errors=0, warnings=1, notices=2, metadata={}, signing_summary={'trivial': 0, 'low': 0, 'medium': 0, 'high': 0}, passed_auto_validation=1)) fname = nfd_str(self.xpi_path(name)) if not storage.exists(fname): with storage.open(fname, 'w') as fs: copyfileobj(open(fname), fs) d = dict(path=fname, name=name, hash='sha256:%s' % name, validation=v) return FileUpload.objects.create(**d) def test_jetpack_version(self): upload = self.upload('jetpack') f = File.from_upload(upload, self.version, self.platform) file_ = File.objects.get(id=f.id) assert file_.jetpack_version == '1.0b4' def test_jetpack_with_invalid_json(self): upload = self.upload('jetpack_invalid') f = File.from_upload(upload, self.version, self.platform) file_ = File.objects.get(id=f.id) assert file_.jetpack_version is None def test_filename(self): upload = self.upload('jetpack') f = File.from_upload(upload, self.version, self.platform) assert f.filename == 'xxx-0.1-mac.xpi' def test_filename_no_extension(self): upload = self.upload('jetpack') # Remove the extension. upload.name = upload.name.rsplit('.', 1)[0] f = File.from_upload(upload, self.version, self.platform) assert f.filename == 'xxx-0.1-mac.xpi' def test_file_validation(self): upload = self.upload('jetpack') file = File.from_upload(upload, self.version, self.platform) fv = FileValidation.objects.get(file=file) assert json.loads(fv.validation) == json.loads(upload.validation) assert fv.valid assert fv.errors == 0 assert fv.warnings == 1 assert fv.notices == 2 def test_file_hash(self): upload = self.upload('jetpack') f = File.from_upload(upload, self.version, self.platform) assert f.hash.startswith('sha256:') assert len(f.hash) == 64 + 7 # 64 for hash, 7 for 'sha256:' def test_no_restart_true(self): upload = self.upload('jetpack') d = parse_addon(upload.path) f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.no_restart def test_no_restart_false(self): upload = self.upload('extension') d = parse_addon(upload.path) f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert not f.no_restart def test_utf8(self): upload = self.upload(u'jétpack') self.version.addon.name = u'jéts!' f = File.from_upload(upload, self.version, self.platform) assert f.filename == u'jets-0.1-mac.xpi' def test_size(self): upload = self.upload('extension') f = File.from_upload(upload, self.version, self.platform) assert f.size == 2264 def test_size_small(self): upload = self.upload('alt-rdf') f = File.from_upload(upload, self.version, self.platform) assert f.size == 675 def test_beta_version_non_public(self): # Only public add-ons can get beta versions. upload = self.upload('beta-extension') d = parse_addon(upload.path) self.addon.update(status=amo.STATUS_LITE) assert self.addon.status == amo.STATUS_LITE f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.status == amo.STATUS_UNREVIEWED def test_public_to_beta(self): upload = self.upload('beta-extension') d = parse_addon(upload.path) self.addon.update(status=amo.STATUS_PUBLIC) assert self.addon.status == amo.STATUS_PUBLIC f = File.from_upload(upload, self.version, self.platform, is_beta=True, parse_data=d) assert f.status == amo.STATUS_BETA def test_public_to_unreviewed(self): upload = self.upload('extension') d = parse_addon(upload.path) self.addon.update(status=amo.STATUS_PUBLIC) assert self.addon.status == amo.STATUS_PUBLIC f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.status == amo.STATUS_UNREVIEWED def test_lite_to_unreviewed(self): upload = self.upload('extension') d = parse_addon(upload.path) self.addon.update(status=amo.STATUS_LITE) assert self.addon.status == amo.STATUS_LITE f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.status == amo.STATUS_UNREVIEWED def test_litenominated_to_unreviewed(self): upload = self.upload('extension') d = parse_addon(upload.path) with mock.patch('olympia.addons.models.Addon.update_status'): # mock update_status because it doesn't like Addons without files. self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED) assert self.addon.status == amo.STATUS_LITE_AND_NOMINATED f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.status == amo.STATUS_UNREVIEWED def test_file_hash_paranoia(self): upload = self.upload('extension') f = File.from_upload(upload, self.version, self.platform) assert f.hash.startswith('sha256:035ae07b4988711') def test_strict_compat(self): upload = self.upload('strict-compat') d = parse_addon(upload.path) f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.strict_compatibility def test_theme_extension(self): upload = self.upload('theme.jar') f = File.from_upload(upload, self.version, self.platform) assert f.filename.endswith('.xpi') def test_extension_extension(self): upload = self.upload('extension.xpi') f = File.from_upload(upload, self.version, self.platform) assert f.filename.endswith('.xpi') def test_langpack_extension(self): upload = self.upload('langpack.xpi') d = parse_addon(upload.path) f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.filename.endswith('.xpi') assert f.no_restart def test_search_extension(self): upload = self.upload('search.xml') d = parse_addon(upload.path) f = File.from_upload(upload, self.version, self.platform, parse_data=d) assert f.filename.endswith('.xml') assert f.no_restart def test_multi_package(self): upload = self.upload('multi-package') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_multi_package': True}) assert file_.is_multi_package def test_not_multi_package(self): upload = self.upload('extension') file_ = File.from_upload(upload, self.version, self.platform) assert not file_.is_multi_package def test_experiment(self): upload = self.upload('telemetry_experiment') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_experiment': True}) assert file_.is_experiment def test_not_experiment(self): upload = self.upload('extension') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_experiment': False}) assert not file_.is_experiment def test_webextension(self): upload = self.upload('webextension') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_webextension': True}) assert file_.is_webextension def test_webextension_zip(self): """Test to ensure we accept ZIP uploads, but convert them into XPI files ASAP to keep things simple. """ upload = self.upload('webextension.zip') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_webextension': True}) assert file_.filename.endswith('.xpi') assert file_.is_webextension storage.delete(upload.path) def test_webextension_crx(self): """Test to ensure we accept CRX uploads, but convert them into XPI files ASAP to keep things simple. """ upload = self.upload('webextension.crx') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_webextension': True}) assert file_.filename.endswith('.xpi') assert file_.is_webextension storage.delete(upload.path) def test_webextension_crx_large(self): """Test to ensure we accept large CRX uploads, because of how we write them to storage. """ upload = self.upload('https-everywhere.crx') file_ = File.from_upload(upload, self.version, self.platform, parse_data={'is_webextension': True}) assert file_.filename.endswith('.xpi') assert file_.is_webextension storage.delete(upload.path) def test_extension_zip(self): upload = self.upload('recurse.zip') file_ = File.from_upload(upload, self.version, self.platform) assert file_.filename.endswith('.xpi') assert not file_.is_webextension storage.delete(upload.path) def test_not_webextension(self): upload = self.upload('extension') file_ = File.from_upload(upload, self.version, self.platform, parse_data={}) assert not file_.is_experiment class TestZip(TestCase, amo.tests.AMOPaths): def test_zip(self): # This zip contains just one file chrome/ that we expect # to be unzipped as a directory, not a file. xpi = self.xpi_path('directory-test') # This is to work around: http://bugs.python.org/issue4710 # which was fixed in Python 2.6.2. If the required version # of Python for zamboni goes to 2.6.2 or above, this can # be removed. try: dest = tempfile.mkdtemp() zipfile.ZipFile(xpi).extractall(dest) assert os.path.isdir(os.path.join(dest, 'chrome')) finally: rm_local_tmp_dir(dest) class TestParseSearch(TestCase, amo.tests.AMOPaths): def parse(self, filename='search.xml'): return parse_addon(open(self.file_fixture_path(filename))) def extract(self): # This is the expected return value from extract_search. return {'url': {u'type': u'text/html', u'template': u'http://www.yyy.com?q={searchTerms}'}, 'xmlns': u'http://a9.com/-/spec/opensearch/1.1/', 'name': u'search tool', 'description': u'Search Engine for Firefox'} def test_basics(self): # This test breaks if the day changes. Have fun with that! assert self.parse() == { 'guid': None, 'name': u'search tool', 'no_restart': True, 'version': datetime.now().strftime('%Y%m%d'), 'summary': u'Search Engine for Firefox', 'type': amo.ADDON_SEARCH} @mock.patch('olympia.files.utils.extract_search') def test_extract_search_error(self, extract_mock): extract_mock.side_effect = Exception with self.assertRaises(forms.ValidationError) as e: self.parse() assert e.exception.messages[0].startswith('Could not parse ') @mock.patch('olympia.files.utils.parse_xpi') @mock.patch('olympia.files.utils.parse_search') def test_parse_addon(search_mock, xpi_mock): parse_addon('file.xpi', None) xpi_mock.assert_called_with('file.xpi', None, True) parse_addon('file.xml', None) search_mock.assert_called_with('file.xml', None) parse_addon('file.jar', None) xpi_mock.assert_called_with('file.jar', None, True) def test_parse_xpi(): """Fire.fm can sometimes give us errors. Let's prevent that.""" firefm = os.path.join(settings.ROOT, 'src/olympia/files/fixtures/files/firefm.xpi') rdf = parse_xpi(open(firefm)) assert rdf['name'] == 'Fire.fm' class LanguagePackBase(UploadTest): fixtures = ['base/appversion'] def setUp(self): super(LanguagePackBase, self).setUp() self.addon = Addon.objects.create(type=amo.ADDON_LPAPP) self.platform = amo.PLATFORM_ALL.id self.version = Version.objects.create(addon=self.addon) self.addon.update(status=amo.STATUS_PUBLIC) self.addon._current_version = self.version class TestLanguagePack(LanguagePackBase): def file_create(self, path): return (File.objects.create(platform=self.platform, version=self.version, filename=self.xpi_path(path))) def test_extract(self): obj = self.file_create('langpack-localepicker') assert 'title=Select a language' in obj.get_localepicker() def test_extract_no_chrome_manifest(self): obj = self.file_create('langpack') assert obj.get_localepicker() == '' def test_zip_invalid(self): obj = self.file_create('search.xml') assert obj.get_localepicker() == '' @mock.patch('olympia.files.utils.SafeUnzip.extract_path') def test_no_locale_browser(self, extract_path): extract_path.return_value = 'some garbage' obj = self.file_create('langpack-localepicker') assert obj.get_localepicker() == '' @mock.patch('olympia.files.utils.SafeUnzip.extract_path') def test_corrupt_locale_browser_path(self, extract_path): extract_path.return_value = 'locale browser de woot?!' obj = self.file_create('langpack-localepicker') assert obj.get_localepicker() == '' extract_path.return_value = 'locale browser de woo:t?!as' # Result should be 'locale browser de woo:t?!as', but we have caching. assert obj.get_localepicker() == '' @mock.patch('olympia.files.utils.SafeUnzip.extract_path') def test_corrupt_locale_browser_data(self, extract_path): extract_path.return_value = 'locale browser de jar:install.rdf!foo' obj = self.file_create('langpack-localepicker') assert obj.get_localepicker() == '' def test_hits_cache(self): obj = self.file_create('langpack-localepicker') assert 'title=Select a language' in obj.get_localepicker() obj.update(filename='garbage') assert 'title=Select a language' in obj.get_localepicker() @mock.patch('olympia.files.models.File.get_localepicker') def test_cache_on_create(self, get_localepicker): self.file_create('langpack-localepicker') assert get_localepicker.called @mock.patch('olympia.files.models.File.get_localepicker') def test_cache_not_on_create(self, get_localepicker): self.addon.update(type=amo.ADDON_DICT) self.file_create('langpack-localepicker') assert not get_localepicker.called
PRIMEDesigner15/PRIMEDesigner15
refs/heads/master
dependencies/Lib/fnmatch.py
894
"""Filename matching with shell patterns. fnmatch(FILENAME, PATTERN) matches according to the local convention. fnmatchcase(FILENAME, PATTERN) always takes case in account. The functions operate by translating the pattern into a regular expression. They cache the compiled regular expressions for speed. The function translate(PATTERN) returns a regular expression corresponding to PATTERN. (It does not compile it.) """ import os import posixpath import re import functools __all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] def fnmatch(name, pat): """Test whether FILENAME matches PATTERN. Patterns are Unix shell style: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq An initial period in FILENAME is not special. Both FILENAME and PATTERN are first case-normalized if the operating system requires it. If you don't want this, use fnmatchcase(FILENAME, PATTERN). """ name = os.path.normcase(name) pat = os.path.normcase(pat) return fnmatchcase(name, pat) @functools.lru_cache(maxsize=256, typed=True) def _compile_pattern(pat): if isinstance(pat, bytes): pat_str = str(pat, 'ISO-8859-1') res_str = translate(pat_str) res = bytes(res_str, 'ISO-8859-1') else: res = translate(pat) return re.compile(res).match def filter(names, pat): """Return the subset of the list NAMES that match PAT.""" result = [] pat = os.path.normcase(pat) match = _compile_pattern(pat) if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result def fnmatchcase(name, pat): """Test whether FILENAME matches PATTERN, including case. This is a version of fnmatch() which doesn't case-normalize its arguments. """ match = _compile_pattern(pat) return match(name) is not None def translate(pat): """Translate a shell PATTERN to a regular expression. There is no way to quote meta-characters. """ i, n = 0, len(pat) res = '' while i < n: c = pat[i] i = i+1 if c == '*': res = res + '.*' elif c == '?': res = res + '.' elif c == '[': j = i if j < n and pat[j] == '!': j = j+1 if j < n and pat[j] == ']': j = j+1 while j < n and pat[j] != ']': j = j+1 if j >= n: res = res + '\\[' else: stuff = pat[i:j].replace('\\','\\\\') i = j+1 if stuff[0] == '!': stuff = '^' + stuff[1:] elif stuff[0] == '^': stuff = '\\' + stuff res = '%s[%s]' % (res, stuff) else: res = res + re.escape(c) return res + '\Z(?ms)'
yhpeng-git/mxnet
refs/heads/master
example/bi-lstm-sort/rnn_model.py
15
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme # pylint: disable=superfluous-parens, no-member, invalid-name import sys sys.path.insert(0, "../../python") import numpy as np import mxnet as mx from lstm import LSTMState, LSTMParam, lstm, bi_lstm_inference_symbol class BiLSTMInferenceModel(object): def __init__(self, seq_len, input_size, num_hidden, num_embed, num_label, arg_params, ctx=mx.cpu(), dropout=0.): self.sym = bi_lstm_inference_symbol(input_size, seq_len, num_hidden, num_embed, num_label, dropout) batch_size = 1 init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(2)] init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(2)] data_shape = [("data", (batch_size, seq_len, ))] input_shapes = dict(init_c + init_h + data_shape) self.executor = self.sym.simple_bind(ctx=mx.cpu(), **input_shapes) for key in self.executor.arg_dict.keys(): if key in arg_params: arg_params[key].copyto(self.executor.arg_dict[key]) state_name = [] for i in range(2): state_name.append("l%d_init_c" % i) state_name.append("l%d_init_h" % i) self.states_dict = dict(zip(state_name, self.executor.outputs[1:])) self.input_arr = mx.nd.zeros(data_shape[0][1]) def forward(self, input_data, new_seq=False): if new_seq == True: for key in self.states_dict.keys(): self.executor.arg_dict[key][:] = 0. input_data.copyto(self.executor.arg_dict["data"]) self.executor.forward() for key in self.states_dict.keys(): self.states_dict[key].copyto(self.executor.arg_dict[key]) prob = self.executor.outputs[0].asnumpy() return prob
dxq-git/zulip
refs/heads/master
zproject/local_settings_template.py
42
# Settings for Zulip Voyager ### MANDATORY SETTINGS # # These settings MUST be set in production. In a development environment, # sensible default values will be used. # The user-accessible Zulip hostname for this installation, e.g. # zulip.example.com EXTERNAL_HOST = 'zulip.example.com' # The email address for the person or team who maintain the Zulip # Voyager installation. Will also get support emails. (e.g. zulip-admin@example.com) ZULIP_ADMINISTRATOR = 'zulip-admin@example.com' # The domain for your organization, e.g. example.com ADMIN_DOMAIN = 'example.com' # Enable at least one of the following authentication backends. AUTHENTICATION_BACKENDS = ( # 'zproject.backends.EmailAuthBackend', # Email and password # 'zproject.backends.ZulipRemoteUserBackend', # Local SSO # 'zproject.backends.GoogleBackend', # Google Apps ) # If you are using the ZulipRemoteUserBackend authentication backend, # set this to your domain (e.g. if REMOTE_USER is "username" and the # corresponding email address is "username@example.com", set # SSO_APPEND_DOMAIN = "example.com") SSO_APPEND_DOMAIN = None # Configure the outgoing SMTP server below. For outgoing email # via a GMail SMTP server, EMAIL_USE_TLS must be True and the # outgoing port must be 587. The EMAIL_HOST is prepopulated # for GMail servers, change it for other hosts, or leave it unset # or empty to skip sending email. EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_PORT = 587 EMAIL_USE_TLS = True # The email From address to be used for automatically generated emails DEFAULT_FROM_EMAIL = "Zulip <zulip@example.com>" # The noreply address to be used as Reply-To for certain generated emails. # Messages sent to this address should not be delivered anywhere. NOREPLY_EMAIL_ADDRESS = "noreply@example.com" # A list of strings representing the host/domain names that this # Django site can serve. You should reset it to be a list of # domains/IP addresses for your site. This is a security measure to # prevent an attacker from poisoning caches and triggering password # reset emails with links to malicious hosts by submitting requests # with a fake HTTP Host header. ALLOWED_HOSTS = ['*'] ### OPTIONAL SETTINGS # Controls whether session cookies expire when the browser closes SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Session cookie expiry in seconds after the last page load SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks # Controls whether or not there is a feedback button in the UI. ENABLE_FEEDBACK = False # By default, the feedback button will submit feedback to the Zulip # developers. If you set FEEDBACK_EMAIL to be an email address # (e.g. ZULIP_ADMINISTRATOR), feedback sent by your users will instead # be sent to that email address. FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR # Controls whether or not error reports are sent to Zulip. Error # reports are used to improve the quality of the product and do not # include message contents; please contact Zulip support with any # questions. ERROR_REPORTING = True # Controls whether or not Zulip will provide inline image preview when # a link to an image is referenced in a message. INLINE_IMAGE_PREVIEW = True # By default, files uploaded by users and user avatars are stored # directly on the Zulip server. If file storage in Amazon S3 (or # elsewhere, e.g. your corporate fileshare) is desired, please contact # Zulip Support (support@zulip.com) for further instructions on # setting up the appropriate integration. LOCAL_UPLOADS_DIR = "/home/zulip/uploads" # Controls whether name changes are completely disabled for this installation # This is useful in settings where you're syncing names from an integrated LDAP/Active Directory NAME_CHANGES_DISABLED = False # Controls whether users who have not uploaded an avatar will receive an avatar # from gravatar.com. ENABLE_GRAVATAR = True # To override the default avatar image if ENABLE_GRAVATAR is False, place your # custom default avatar image at /home/zulip/local-static/default-avatar.png # and uncomment the following line. #DEFAULT_AVATAR_URI = '/local-static/default-avatar.png' ### TWITTER INTEGRATION # Zulip supports showing inline Tweet previews when a tweet is linked # to in a message. To support this, Zulip must have access to the # Twitter API via OAuth. To obtain the various access tokens needed # below, you must register a new application under your Twitter # account by doing the following: # # 1. Log in to http://dev.twitter.com. # 2. In the menu under your username, click My Applications. From this page, create a new application. # 3. Click on the application you created and click "create my access token". Fill in the requested values. TWITTER_CONSUMER_KEY = '' TWITTER_CONSUMER_SECRET = '' TWITTER_ACCESS_TOKEN_KEY = '' TWITTER_ACCESS_TOKEN_SECRET = '' ### EMAIL GATEWAY INTEGRATION # The email gateway provides, for each stream, an email address that # you can send email to in order to have the email's content be posted # to that stream. Emails received at the per-stream email address # will be converted into a Zulip message # There are two ways to make use of local email mirroring: # 1. Local delivery: A MTA runs locally and passes mail directly to Zulip # 2. Polling: Checks an IMAP inbox every minute for new messages. # A Puppet manifest for local delivery via Postfix is available in # puppet/zulip/manifests/postfix_localmail.pp. To use the manifest, add it to # puppet_classes in /etc/zulip/zulip.conf. This manifest assumes you'll receive # mail addressed to the hostname of your Zulip server. # # Users of other mail servers will need to configure it to pass mail to the # email mirror; see `python manage.py email-mirror --help` for details. # The email address pattern to use for auto-generated stream emails # The %s will be replaced with a unique token, and the resulting email # must be delivered to the EMAIL_GATEWAY_IMAP_FOLDER of the # EMAIL_GATEWAY_LOGIN account below, or piped in to the email-mirror management # command as indicated above. # # Example: zulip+%s@example.com EMAIL_GATEWAY_PATTERN = "" # The following options are relevant if you're using mail polling. # # A sample cron job for mail polling is available at puppet/zulip/files/cron.d/email-mirror # # The Zulip username of the bot that the email pattern should post as. # Example: emailgateway@example.com EMAIL_GATEWAY_BOT = "" # Configuration of the email mirror mailbox # The IMAP login and password EMAIL_GATEWAY_LOGIN = "" EMAIL_GATEWAY_PASSWORD = "" # The IMAP server & port to connect to EMAIL_GATEWAY_IMAP_SERVER = "" EMAIL_GATEWAY_IMAP_PORT = 993 # The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above # must be delivered to this folder EMAIL_GATEWAY_IMAP_FOLDER = "INBOX" ### LDAP integration configuration # Zulip supports retrieving information about users via LDAP, and optionally # using LDAP as an authentication mechanism. import ldap from django_auth_ldap.config import LDAPSearch, GroupOfNamesType # URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in # Zulip. Example: "ldaps://ldap.example.com" AUTH_LDAP_SERVER_URI = "" # This DN and password will be used to bind to your server. If unset, anonymous # binds are performed. AUTH_LDAP_BIND_DN = "" AUTH_LDAP_BIND_PASSWORD = "" # Specify the search base and the property to filter on that corresponds to the # username. AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(uid=%(user)s)") # If the value of a user's "uid" (or similar) property is not their email # address, specify the domain to append here. LDAP_APPEND_DOMAIN = ADMIN_DOMAIN # This map defines how to populate attributes of a Zulip user from LDAP. AUTH_LDAP_USER_ATTR_MAP = { # Populate the Django user's name from the LDAP directory. "full_name": "cn", } CAMO_URI = ''
jlmadurga/django-oscar
refs/heads/master
src/oscar/apps/catalogue/migrations/0001_initial.py
57
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import oscar.models.fields.autoslugfield import django.db.models.deletion import django.core.validators import oscar.models.fields class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='AttributeOption', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('option', models.CharField(max_length=255, verbose_name='Option')), ], options={ 'verbose_name_plural': 'Attribute options', 'verbose_name': 'Attribute option', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='AttributeOptionGroup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128, verbose_name='Name')), ], options={ 'verbose_name_plural': 'Attribute option groups', 'verbose_name': 'Attribute option group', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('path', models.CharField(unique=True, max_length=255)), ('depth', models.PositiveIntegerField()), ('numchild', models.PositiveIntegerField(default=0)), ('name', models.CharField(max_length=255, db_index=True, verbose_name='Name')), ('description', models.TextField(verbose_name='Description', blank=True)), ('image', models.ImageField(upload_to='categories', verbose_name='Image', max_length=255, blank=True, null=True)), ('slug', models.SlugField(max_length=255, editable=False, verbose_name='Slug')), ('full_name', models.CharField(max_length=255, editable=False, db_index=True, verbose_name='Full Name')), ], options={ 'ordering': ['full_name'], 'verbose_name_plural': 'Categories', 'verbose_name': 'Category', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='Option', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128, verbose_name='Name')), ('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', max_length=128, editable=False, blank=True)), ('type', models.CharField(default='Required', max_length=128, verbose_name='Status', choices=[('Required', 'Required - a value for this option must be specified'), ('Optional', 'Optional - a value for this option can be omitted')])), ], options={ 'verbose_name_plural': 'Options', 'verbose_name': 'Option', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('structure', models.CharField(default='standalone', max_length=10, verbose_name='Product structure', choices=[('standalone', 'Stand-alone product'), ('parent', 'Parent product'), ('child', 'Child product')])), ('upc', oscar.models.fields.NullCharField(unique=True, verbose_name='UPC', max_length=64, help_text='Universal Product Code (UPC) is an identifier for a product which is not specific to a particular supplier. Eg an ISBN for a book.')), ('title', models.CharField(max_length=255, verbose_name='Title', blank=True)), ('slug', models.SlugField(max_length=255, verbose_name='Slug')), ('description', models.TextField(verbose_name='Description', blank=True)), ('rating', models.FloatField(editable=False, verbose_name='Rating', null=True)), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')), ('date_updated', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Date updated')), ('is_discountable', models.BooleanField(default=True, verbose_name='Is discountable?', help_text='This flag indicates if this product can be used in an offer or not')), ], options={ 'ordering': ['-date_created'], 'verbose_name_plural': 'Products', 'verbose_name': 'Product', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductAttribute', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128, verbose_name='Name')), ('code', models.SlugField(max_length=128, verbose_name='Code', validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z\\-_][0-9a-zA-Z\\-_]*$', message="Code can only contain the letters a-z, A-Z, digits, minus and underscores, and can't start with a digit")])), ('type', models.CharField(default='text', max_length=20, verbose_name='Type', choices=[('text', 'Text'), ('integer', 'Integer'), ('boolean', 'True / False'), ('float', 'Float'), ('richtext', 'Rich Text'), ('date', 'Date'), ('option', 'Option'), ('entity', 'Entity'), ('file', 'File'), ('image', 'Image')])), ('required', models.BooleanField(default=False, verbose_name='Required')), ('option_group', models.ForeignKey(null=True, verbose_name='Option Group', help_text='Select an option group if using type "Option"', to='catalogue.AttributeOptionGroup', blank=True)), ], options={ 'ordering': ['code'], 'verbose_name_plural': 'Product attributes', 'verbose_name': 'Product attribute', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductAttributeValue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value_text', models.TextField(blank=True, verbose_name='Text', null=True)), ('value_integer', models.IntegerField(blank=True, verbose_name='Integer', null=True)), ('value_boolean', models.NullBooleanField(verbose_name='Boolean')), ('value_float', models.FloatField(blank=True, verbose_name='Float', null=True)), ('value_richtext', models.TextField(blank=True, verbose_name='Richtext', null=True)), ('value_date', models.DateField(blank=True, verbose_name='Date', null=True)), ('value_file', models.FileField(upload_to='images/products/%Y/%m/', max_length=255, blank=True, null=True)), ('value_image', models.ImageField(upload_to='images/products/%Y/%m/', max_length=255, blank=True, null=True)), ('entity_object_id', models.PositiveIntegerField(blank=True, editable=False, null=True)), ('attribute', models.ForeignKey(verbose_name='Attribute', to='catalogue.ProductAttribute')), ('entity_content_type', models.ForeignKey(null=True, editable=False, to='contenttypes.ContentType', blank=True)), ('product', models.ForeignKey(verbose_name='Product', related_name='attribute_values', to='catalogue.Product')), ('value_option', models.ForeignKey(null=True, verbose_name='Value option', to='catalogue.AttributeOption', blank=True)), ], options={ 'verbose_name_plural': 'Product attribute values', 'verbose_name': 'Product attribute value', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductCategory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('category', models.ForeignKey(verbose_name='Category', to='catalogue.Category')), ('product', models.ForeignKey(verbose_name='Product', to='catalogue.Product')), ], options={ 'ordering': ['product', 'category'], 'verbose_name_plural': 'Product categories', 'verbose_name': 'Product category', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductClass', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128, verbose_name='Name')), ('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)), ('requires_shipping', models.BooleanField(default=True, verbose_name='Requires shipping?')), ('track_stock', models.BooleanField(default=True, verbose_name='Track stock levels?')), ('options', models.ManyToManyField(verbose_name='Options', to='catalogue.Option', blank=True)), ], options={ 'ordering': ['name'], 'verbose_name_plural': 'Product classes', 'verbose_name': 'Product class', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('original', models.ImageField(upload_to='images/products/%Y/%m/', max_length=255, verbose_name='Original')), ('caption', models.CharField(max_length=200, verbose_name='Caption', blank=True)), ('display_order', models.PositiveIntegerField(default=0, verbose_name='Display order', help_text='An image with a display order of zero will be the primary image for a product')), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')), ('product', models.ForeignKey(verbose_name='Product', related_name='images', to='catalogue.Product')), ], options={ 'ordering': ['display_order'], 'verbose_name_plural': 'Product images', 'verbose_name': 'Product image', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='ProductRecommendation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ranking', models.PositiveSmallIntegerField(default=0, verbose_name='Ranking', help_text='Determines order of the products. A product with a higher value will appear before one with a lower ranking.')), ('primary', models.ForeignKey(verbose_name='Primary product', related_name='primary_recommendations', to='catalogue.Product')), ('recommendation', models.ForeignKey(verbose_name='Recommended product', to='catalogue.Product')), ], options={ 'ordering': ['primary', '-ranking'], 'verbose_name_plural': 'Product recomendations', 'verbose_name': 'Product recommendation', 'abstract': False, }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='productrecommendation', unique_together=set([('primary', 'recommendation')]), ), migrations.AlterUniqueTogether( name='productimage', unique_together=set([('product', 'display_order')]), ), migrations.AlterUniqueTogether( name='productcategory', unique_together=set([('product', 'category')]), ), migrations.AlterUniqueTogether( name='productattributevalue', unique_together=set([('attribute', 'product')]), ), migrations.AddField( model_name='productattribute', name='product_class', field=models.ForeignKey(null=True, verbose_name='Product type', related_name='attributes', to='catalogue.ProductClass', blank=True), preserve_default=True, ), migrations.AddField( model_name='product', name='attributes', field=models.ManyToManyField(verbose_name='Attributes', help_text='A product attribute is something that this product may have, such as a size, as specified by its class', to='catalogue.ProductAttribute', through='catalogue.ProductAttributeValue'), preserve_default=True, ), migrations.AddField( model_name='product', name='categories', field=models.ManyToManyField(through='catalogue.ProductCategory', verbose_name='Categories', to='catalogue.Category'), preserve_default=True, ), migrations.AddField( model_name='product', name='parent', field=models.ForeignKey(null=True, verbose_name='Parent product', related_name='children', help_text="Only choose a parent product if you're creating a child product. For example if this is a size 4 of a particular t-shirt. Leave blank if this is a stand-alone product (i.e. there is only one version of this product).", to='catalogue.Product', blank=True), preserve_default=True, ), migrations.AddField( model_name='product', name='product_class', field=models.ForeignKey(verbose_name='Product type', on_delete=django.db.models.deletion.PROTECT, related_name='products', help_text='Choose what type of product this is', to='catalogue.ProductClass', null=True), preserve_default=True, ), migrations.AddField( model_name='product', name='product_options', field=models.ManyToManyField(verbose_name='Product options', help_text="Options are values that can be associated with a item when it is added to a customer's basket. This could be something like a personalised message to be printed on a T-shirt.", to='catalogue.Option', blank=True), preserve_default=True, ), migrations.AddField( model_name='product', name='recommended_products', field=models.ManyToManyField(verbose_name='Recommended products', help_text='These are products that are recommended to accompany the main product.', to='catalogue.Product', through='catalogue.ProductRecommendation', blank=True), preserve_default=True, ), migrations.AddField( model_name='attributeoption', name='group', field=models.ForeignKey(verbose_name='Group', related_name='options', to='catalogue.AttributeOptionGroup'), preserve_default=True, ), ]
ibm-watson-iot/device-kodi
refs/heads/master
service.iotf/lib/requests/packages/urllib3/contrib/pyopenssl.py
488
'''SSL with SNI_-support for Python 2. Follow these instructions if you would like to verify SSL certificates in Python 2. Note, the default libraries do *not* do certificate checking; you need to do additional work to validate certificates yourself. This needs the following packages installed: * pyOpenSSL (tested with 0.13) * ndg-httpsclient (tested with 0.3.2) * pyasn1 (tested with 0.1.6) You can install them with the following command: pip install pyopenssl ndg-httpsclient pyasn1 To activate certificate checking, call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code before you begin making HTTP requests. This can be done in a ``sitecustomize`` module, or at any other time before your application begins using ``urllib3``, like this:: try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. Activating this module also has the positive side effect of disabling SSL/TLS compression in Python 2 (see `CRIME attack`_). If you want to configure the default list of supported cipher suites, you can set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. Module Variables ---------------- :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites. .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) ''' try: from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName except SyntaxError as e: raise ImportError(e) import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder from pyasn1.type import univ, constraint from socket import _fileobject, timeout import ssl import select from .. import connection from .. import util __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] # SNI only *really* works if we can read the subjectAltName of certificates. HAS_SNI = SUBJ_ALT_NAME_SUPPORT # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } try: _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) except AttributeError: pass _openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS orig_util_HAS_SNI = util.HAS_SNI orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket def inject_into_urllib3(): 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' connection.ssl_wrap_socket = ssl_wrap_socket util.HAS_SNI = HAS_SNI def extract_from_urllib3(): 'Undo monkey-patching by :func:`inject_into_urllib3`.' connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket util.HAS_SNI = orig_util_HAS_SNI ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. class SubjectAltName(BaseSubjectAltName): '''ASN.1 implementation for subjectAltNames support''' # There is no limit to how many SAN certificates a certificate may have, # however this needs to have some limit so we'll set an arbitrarily high # limit. sizeSpec = univ.SequenceOf.sizeSpec + \ constraint.ValueSizeConstraint(1, 1024) ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. def get_subj_alt_name(peer_cert): # Search through extensions dns_name = [] if not SUBJ_ALT_NAME_SUPPORT: return dns_name general_names = SubjectAltName() for i in range(peer_cert.get_extension_count()): ext = peer_cert.get_extension(i) ext_name = ext.get_short_name() if ext_name != 'subjectAltName': continue # PyOpenSSL returns extension data in ASN.1 encoded form ext_dat = ext.get_data() decoded_dat = der_decoder.decode(ext_dat, asn1Spec=general_names) for name in decoded_dat: if not isinstance(name, SubjectAltName): continue for entry in range(len(name)): component = name.getComponentByPosition(entry) if component.getName() != 'dNSName': continue dns_name.append(str(component.getComponent())) return dns_name class WrappedSocket(object): '''API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. ''' def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection self.socket = socket self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0 def fileno(self): return self.socket.fileno() def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): return b'' else: raise except OpenSSL.SSL.ZeroReturnError as e: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return b'' else: raise except OpenSSL.SSL.WantReadError: rd, wd, ed = select.select( [self.socket], [], [], self.socket.gettimeout()) if not rd: raise timeout('The read operation timed out') else: return self.recv(*args, **kwargs) else: return data def settimeout(self, timeout): return self.socket.settimeout(timeout) def _send_until_done(self, data): while True: try: return self.connection.send(data) except OpenSSL.SSL.WantWriteError: _, wlist, _ = select.select([], [self.socket], [], self.socket.gettimeout()) if not wlist: raise timeout() continue def sendall(self, data): while len(data): sent = self._send_until_done(data) data = data[sent:] def close(self): if self._makefile_refs < 1: return self.connection.shutdown() else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() if not x509: return x509 if binary_form: return OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_ASN1, x509) return { 'subject': ( (('commonName', x509.get_subject().CN),), ), 'subjectAltName': [ ('DNS', value) for value in get_subj_alt_name(x509) ] } def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0 def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version]) if certfile: keyfile = keyfile or certfile # Match behaviour of the normal python ssl library ctx.use_certificate_file(certfile) if keyfile: ctx.use_privatekey_file(keyfile) if cert_reqs != ssl.CERT_NONE: ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback) if ca_certs: try: ctx.load_verify_locations(ca_certs, None) except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e) else: ctx.set_default_verify_paths() # Disable TLS compression to migitate CRIME attack (issue #309) OP_NO_COMPRESSION = 0x20000 ctx.set_options(OP_NO_COMPRESSION) # Set list of supported ciphersuites. ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: rd, _, _ = select.select([sock], [], [], sock.gettimeout()) if not rd: raise timeout('select timed out') continue except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad handshake', e) break return WrappedSocket(cnx, sock)
bcl/pykickstart
refs/heads/master
pykickstart/handlers/f10.py
3
# # Chris Lumens <clumens@redhat.com> # # Copyright 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # __all__ = ["F10Handler"] from pykickstart import commands from pykickstart.base import BaseHandler from pykickstart.version import F10 class F10Handler(BaseHandler): version = F10 commandMap = { "auth": commands.authconfig.FC3_Authconfig, "authconfig": commands.authconfig.FC3_Authconfig, "autopart": commands.autopart.F9_AutoPart, "autostep": commands.autostep.FC3_AutoStep, "bootloader": commands.bootloader.F8_Bootloader, "cdrom": commands.cdrom.FC3_Cdrom, "clearpart": commands.clearpart.FC3_ClearPart, "cmdline": commands.displaymode.FC3_DisplayMode, "device": commands.device.F8_Device, "deviceprobe": commands.deviceprobe.FC3_DeviceProbe, "dmraid": commands.dmraid.FC6_DmRaid, "driverdisk": commands.driverdisk.FC4_DriverDisk, "firewall": commands.firewall.F10_Firewall, "firstboot": commands.firstboot.FC3_Firstboot, "graphical": commands.displaymode.FC3_DisplayMode, "halt": commands.reboot.FC6_Reboot, "harddrive": commands.harddrive.FC3_HardDrive, "ignoredisk": commands.ignoredisk.F8_IgnoreDisk, "install": commands.upgrade.FC3_Upgrade, "interactive": commands.interactive.FC3_Interactive, "iscsi": commands.iscsi.F10_Iscsi, "iscsiname": commands.iscsiname.FC6_IscsiName, "keyboard": commands.keyboard.FC3_Keyboard, "lang": commands.lang.FC3_Lang, "logging": commands.logging.FC6_Logging, "logvol": commands.logvol.F9_LogVol, "mediacheck": commands.mediacheck.FC4_MediaCheck, "method": commands.method.FC6_Method, "monitor": commands.monitor.F10_Monitor, "multipath": commands.multipath.FC6_MultiPath, "network": commands.network.F9_Network, "nfs": commands.nfs.FC6_NFS, "part": commands.partition.F9_Partition, "partition": commands.partition.F9_Partition, "poweroff": commands.reboot.FC6_Reboot, "raid": commands.raid.F9_Raid, "reboot": commands.reboot.FC6_Reboot, "repo": commands.repo.F8_Repo, "rescue": commands.rescue.F10_Rescue, "rootpw": commands.rootpw.F8_RootPw, "selinux": commands.selinux.FC3_SELinux, "services": commands.services.FC6_Services, "shutdown": commands.reboot.FC6_Reboot, "skipx": commands.skipx.FC3_SkipX, "text": commands.displaymode.FC3_DisplayMode, "timezone": commands.timezone.FC6_Timezone, "updates": commands.updates.F7_Updates, "upgrade": commands.upgrade.FC3_Upgrade, "url": commands.url.FC3_Url, "user": commands.user.F8_User, "vnc": commands.vnc.F9_Vnc, "volgroup": commands.volgroup.FC3_VolGroup, "xconfig": commands.xconfig.F10_XConfig, "zerombr": commands.zerombr.F9_ZeroMbr, "zfcp": commands.zfcp.FC3_ZFCP, } dataMap = { "DriverDiskData": commands.driverdisk.FC4_DriverDiskData, "DeviceData": commands.device.F8_DeviceData, "DmRaidData": commands.dmraid.FC6_DmRaidData, "IscsiData": commands.iscsi.F10_IscsiData, "LogVolData": commands.logvol.F9_LogVolData, "MultiPathData": commands.multipath.FC6_MultiPathData, "NetworkData": commands.network.F8_NetworkData, "PartData": commands.partition.F9_PartData, "RaidData": commands.raid.F9_RaidData, "RepoData": commands.repo.F8_RepoData, "UserData": commands.user.F8_UserData, "VolGroupData": commands.volgroup.FC3_VolGroupData, "ZFCPData": commands.zfcp.FC3_ZFCPData, }
google-research/google-research
refs/heads/master
neural_guided_symbolic_regression/mcts/policies.py
1
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates actions transform a state to new states.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from neural_guided_symbolic_regression.mcts import states class PolicyBase(object): """Base class of policy. Subclasses should define the following method: * get_new_states_probs """ def get_new_states_probs(self, state): """Gets new states and probabilities by applying actions on input state. Args: state: An object in mcts.states.StateBase. Contains all the information of a state. Returns: new_states: A list of next states. Each state is a result from apply an action in the instance attribute actions to the input state. action_probs: A float numpy array with shape [num_actions,]. The probability of each action in the class attribute actions. """ raise NotImplementedError('Must be implemented by subclass.') class ProductionRuleAppendPolicy(PolicyBase): """Appends a valid production rule on existing list of production rules. An new state is generated by appending a production rule in context-free grammar to the production rule sequence in the current state. Thus, in principle, the number of new states for any state equals to the number of unique production rules in the context-free grammar. However, not all the production rule is valid to append, so some new states are forbidden. Inspired from the encoding and decoding methods in "Grammar Variational Autoencoder" (https://arxiv.org/abs/1703.01925), the production rule sequence is the preorder traversal of the parsing tree of expression. For example, a parsing tree of expression 'a + T' can be S | S '+' T | T | 'a' The preorder traversal of the above parsing tree is S -> S '+' T S -> T T -> 'a' Assuming the grammar is S -> S '+' T S -> S '-' T S -> S '*' T S -> S '/' T S -> T T -> 'a' T -> 'b' Among all the 7 grammar production rules, the only allowed production rules for current state 'a + T' are T -> 'a' and T -> 'b', because the next production rule must start with left hand side symbol T according to the preorder traversal. Thus, the prior probabilities of the first 5 production rules will be nan. """ def __init__(self, grammar): """Initializer. Args: grammar: nltk.grammar.CFG object for context-free grammar. """ self._grammar = grammar def get_new_states_probs(self, state): """Gets new state from current state by appending a valid production rule. Args: state: A mcts.states.ProductionRulesState object. Contains a list of nltk.grammar.Production objects in attribute production_rules_sequence. Returns: new_states: A list of next states. Each state is a result from apply an action in the instance attribute actions to the input state. action_probs: A float numpy array with shape [num_actions,]. The probability of each action in the class attribute actions. Raises: TypeError: If input state is not states.ProductionRulesState object. """ if not isinstance(state, states.ProductionRulesState): raise TypeError('Input state shoud be an instance of ' 'states.ProductionRulesState but got %s' % type(state)) new_states = [] action_probs = [] for production_rule in self._grammar.productions(): if state.is_valid_to_append(production_rule): new_state = state.copy() new_state.append_production_rule(production_rule) new_states.append(new_state) action_probs.append(1.) else: new_states.append(None) action_probs.append(np.nan) action_probs = np.asarray(action_probs) action_probs /= np.nansum(action_probs) return new_states, action_probs
rrooij/youtube-dl
refs/heads/master
youtube_dl/extractor/xminus.py
87
# coding: utf-8 from __future__ import unicode_literals import re import time from .common import InfoExtractor from ..compat import ( compat_ord, ) from ..utils import ( int_or_none, parse_duration, ) class XMinusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)' _TEST = { 'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html', 'md5': '401a15f2d2dcf6d592cb95528d72a2a8', 'info_dict': { 'id': '4542', 'ext': 'mp3', 'title': 'Леонид Агутин-Песенка шофёра', 'duration': 156, 'tbr': 320, 'filesize_approx': 5900000, 'view_count': int, 'description': 'md5:03238c5b663810bc79cf42ef3c03e371', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) artist = self._html_search_regex( r'<a[^>]+href="/artist/\d+">([^<]+)</a>', webpage, 'artist') title = artist + '-' + self._html_search_regex( r'<span[^>]+class="minustrack-full-title(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'title') duration = parse_duration(self._html_search_regex( r'<span[^>]+class="player-duration(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'duration', fatal=False)) mobj = re.search( r'<div[^>]+class="dw-info(?:\s+[^"]+)?"[^>]*>(?P<tbr>\d+)\s*кбит/c\s+(?P<filesize>[0-9.]+)\s*мб</div>', webpage) tbr = filesize_approx = None if mobj: filesize_approx = float(mobj.group('filesize')) * 1000000 tbr = float(mobj.group('tbr')) view_count = int_or_none(self._html_search_regex( r'<span><[^>]+class="icon-chart-bar".*?>(\d+)</span>', webpage, 'view count', fatal=False)) description = self._html_search_regex( r'(?s)<pre[^>]+id="lyrics-original"[^>]*>(.*?)</pre>', webpage, 'song lyrics', fatal=False) if description: description = re.sub(' *\r *', '\n', description) k = self._search_regex( r'<div[^>]+id="player-bottom"[^>]+data-k="([^"]+)">', webpage, 'encoded data') h = time.time() / 3600 a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h) return { 'id': video_id, 'title': title, 'url': video_url, # The extension is unknown until actual downloading 'ext': 'mp3', 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': tbr, 'view_count': view_count, 'description': description, }
gmr/email-normalize
refs/heads/master
tests/test_normalizer.py
1
import asyncio import functools import logging import operator import os import time import unittest import uuid import aiodns from asynctest import mock import email_normalize LOGGER = logging.getLogger(__name__) def async_test(*func): if func: @functools.wraps(func[0]) def wrapper(*args, **kwargs): LOGGER.debug('Starting test with loop %r', args[0]) args[0].loop.run_until_complete(func[0](*args, **kwargs)) LOGGER.debug('Test completed') return wrapper class AsyncTestCase(unittest.TestCase): def setUp(self) -> None: self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.loop.set_debug(True) self.timeout = int(os.environ.get('ASYNC_TIMEOUT', '5')) self.timeout_handle = self.loop.call_later( self.timeout, self.on_timeout) self.resolver = aiodns.DNSResolver(loop=self.loop) self.normalizer = email_normalize.Normalizer() self.normalizer._resolver = self.resolver email_normalize.cache = {} def tearDown(self): LOGGER.debug('In AsyncTestCase.tearDown') if not self.timeout_handle.cancelled(): self.timeout_handle.cancel() self.loop.run_until_complete(self.loop.shutdown_asyncgens()) if self.loop.is_running: self.loop.close() super().tearDown() def on_timeout(self): self.loop.stop() raise TimeoutError( 'Test duration exceeded {} seconds'.format(self.timeout)) class NormalizerTestCase(AsyncTestCase): def setUp(self) -> None: super().setUp() if 'gmail.com' in email_normalize.cache: del email_normalize.cache['gmail.com'] @async_test async def test_mx_records(self): result = await self.resolver.query('gmail.com', 'MX') expectation = [] for record in result: expectation.append((record.priority, record.host)) expectation.sort(key=operator.itemgetter(0, 1)) self.assertListEqual( await self.normalizer.mx_records('gmail.com'), expectation) @async_test async def test_cache(self): await self.normalizer.mx_records('gmail.com') await self.normalizer.mx_records('gmail.com') self.assertEqual(email_normalize.cache['gmail.com'].hits, 2) del email_normalize.cache['gmail.com'] self.assertNotIn('gmail.com', email_normalize.cache) with self.assertRaises(KeyError): self.assertIsNone(email_normalize.cache['foo']) @async_test async def test_cache_max_size(self): for offset in range(0, self.normalizer.cache_limit): key = 'key-{}'.format(offset) email_normalize.cache[key] = email_normalize.CachedItem([], 60) email_normalize.cache[key].hits = 3 email_normalize.cache[key].last_access = time.monotonic() key1 = 'gmail.com' await self.normalizer.mx_records(key1) self.assertNotIn('key-0', email_normalize.cache) key2 = 'github.com' await self.normalizer.mx_records(key2) self.assertNotIn(key1, email_normalize.cache) self.assertIn(key2, email_normalize.cache) @async_test async def test_cache_expiration(self): await self.normalizer.mx_records('gmail.com') cached_at = email_normalize.cache['gmail.com'].cached_at email_normalize.cache['gmail.com'].ttl = 1 await asyncio.sleep(1) self.assertTrue(email_normalize.cache['gmail.com'].expired) await self.normalizer.mx_records('gmail.com') self.assertGreater( email_normalize.cache['gmail.com'].cached_at, cached_at) @async_test async def test_empty_mx_list(self): with mock.patch.object(self.normalizer, 'mx_records') as mx_records: mx_records.return_value = [] result = await self.normalizer.normalize('foo@bar.com') self.assertEqual(result.normalized_address, 'foo@bar.com') self.assertIsNone(result.mailbox_provider) self.assertListEqual(result.mx_records, []) @async_test async def test_failure_cached(self): key = str(uuid.uuid4()) records = await self.normalizer.mx_records(key) self.assertListEqual(records, []) self.assertIn(key, email_normalize.cache.keys()) @async_test async def test_failure_not_cached(self): email_normalize.cache_failures = False key = str(uuid.uuid4()) records = await self.normalizer.mx_records(key) self.assertListEqual(records, []) email_normalize.cache_failures = True @async_test async def test_weird_mx_list(self): with mock.patch.object(self.normalizer, 'mx_records') as recs: recs.return_value = [ (1, str(uuid.uuid4())), (10, 'aspmx.l.google.com') ] result = await self.normalizer.normalize('f.o.o+bar@gmail.com') self.assertEqual(result.normalized_address, 'foo@gmail.com') self.assertEqual(result.mailbox_provider, 'Google')
varunkumta/azure-linux-extensions
refs/heads/master
Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_pssunos.py
37
#!/usr/bin/env python # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Sun OS Solaris platform implementation.""" import errno import os import socket import subprocess import sys from collections import namedtuple from psutil import _common from psutil import _psposix from psutil._common import usage_percent, isfile_strict from psutil._compat import PY3 import _psutil_posix import _psutil_sunos as cext __extra__all__ = ["CONN_IDLE", "CONN_BOUND"] PAGE_SIZE = os.sysconf('SC_PAGE_SIZE') CONN_IDLE = "IDLE" CONN_BOUND = "BOUND" PROC_STATUSES = { cext.SSLEEP: _common.STATUS_SLEEPING, cext.SRUN: _common.STATUS_RUNNING, cext.SZOMB: _common.STATUS_ZOMBIE, cext.SSTOP: _common.STATUS_STOPPED, cext.SIDL: _common.STATUS_IDLE, cext.SONPROC: _common.STATUS_RUNNING, # same as run cext.SWAIT: _common.STATUS_WAITING, } TCP_STATUSES = { cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, cext.TCPS_CLOSED: _common.CONN_CLOSE, cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, cext.TCPS_LISTEN: _common.CONN_LISTEN, cext.TCPS_CLOSING: _common.CONN_CLOSING, cext.PSUTIL_CONN_NONE: _common.CONN_NONE, cext.TCPS_IDLE: CONN_IDLE, # sunos specific cext.TCPS_BOUND: CONN_BOUND, # sunos specific } scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) pextmem = namedtuple('pextmem', ['rss', 'vms']) pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked']) pmmap_ext = namedtuple( 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) # set later from __init__.py NoSuchProcess = None AccessDenied = None TimeoutExpired = None # --- functions disk_io_counters = cext.disk_io_counters net_io_counters = cext.net_io_counters disk_usage = _psposix.disk_usage def virtual_memory(): # we could have done this with kstat, but imho this is good enough total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE # note: there's no difference on Solaris free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE used = total - free percent = usage_percent(used, total, _round=1) return svmem(total, avail, percent, used, free) def swap_memory(): sin, sout = cext.swap_mem() # XXX # we are supposed to get total/free by doing so: # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/ # usr/src/cmd/swap/swap.c # ...nevertheless I can't manage to obtain the same numbers as 'swap' # cmdline utility, so let's parse its output (sigh!) p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE) stdout, stderr = p.communicate() if PY3: stdout = stdout.decode(sys.stdout.encoding) if p.returncode != 0: raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode) lines = stdout.strip().split('\n')[1:] if not lines: raise RuntimeError('no swap device(s) configured') total = free = 0 for line in lines: line = line.split() t, f = line[-2:] t = t.replace('K', '') f = f.replace('K', '') total += int(int(t) * 1024) free += int(int(f) * 1024) used = total - free percent = usage_percent(used, total, _round=1) return _common.sswap(total, used, free, percent, sin * PAGE_SIZE, sout * PAGE_SIZE) def pids(): """Returns a list of PIDs currently running on the system.""" return [int(x) for x in os.listdir('/proc') if x.isdigit()] def pid_exists(pid): """Check for the existence of a unix pid.""" return _psposix.pid_exists(pid) def cpu_times(): """Return system-wide CPU times as a named tuple""" ret = cext.per_cpu_times() return scputimes(*[sum(x) for x in zip(*ret)]) def per_cpu_times(): """Return system per-CPU times as a list of named tuples""" ret = cext.per_cpu_times() return [scputimes(*x) for x in ret] def cpu_count_logical(): """Return the number of logical CPUs in the system.""" try: return os.sysconf("SC_NPROCESSORS_ONLN") except ValueError: # mimic os.cpu_count() behavior return None def cpu_count_physical(): """Return the number of physical CPUs in the system.""" return cext.cpu_count_phys() def boot_time(): """The system boot time expressed in seconds since the epoch.""" return cext.boot_time() def users(): """Return currently connected users as a list of namedtuples.""" retlist = [] rawlist = cext.users() localhost = (':0.0', ':0') for item in rawlist: user, tty, hostname, tstamp, user_process = item # note: the underlying C function includes entries about # system boot, run level and others. We might want # to use them in the future. if not user_process: continue if hostname in localhost: hostname = 'localhost' nt = _common.suser(user, tty, hostname, tstamp) retlist.append(nt) return retlist def disk_partitions(all=False): """Return system disk partitions.""" # TODO - the filtering logic should be better checked so that # it tries to reflect 'df' as much as possible retlist = [] partitions = cext.disk_partitions() for partition in partitions: device, mountpoint, fstype, opts = partition if device == 'none': device = '' if not all: # Differently from, say, Linux, we don't have a list of # common fs types so the best we can do, AFAIK, is to # filter by filesystem having a total size > 0. if not disk_usage(mountpoint).total: continue ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) retlist.append(ntuple) return retlist def net_connections(kind, _pid=-1): """Return socket connections. If pid == -1 return system-wide connections (as opposed to connections opened by one process only). Only INET sockets are returned (UNIX are not). """ cmap = _common.conn_tmap.copy() if _pid == -1: cmap.pop('unix', 0) if kind not in cmap: raise ValueError("invalid %r kind argument; choose between %s" % (kind, ', '.join([repr(x) for x in cmap]))) families, types = _common.conn_tmap[kind] rawlist = cext.net_connections(_pid, families, types) ret = [] for item in rawlist: fd, fam, type_, laddr, raddr, status, pid = item if fam not in families: continue if type_ not in types: continue status = TCP_STATUSES[status] if _pid == -1: nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) else: nt = _common.pconn(fd, fam, type_, laddr, raddr, status) ret.append(nt) return ret def wrap_exceptions(fun): """Call callable into a try/except clause and translate ENOENT, EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. """ def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) except EnvironmentError as err: # support for private module import if NoSuchProcess is None or AccessDenied is None: raise # ENOENT (no such file or directory) gets raised on open(). # ESRCH (no such process) can get raised on read() if # process is gone in meantime. if err.errno in (errno.ENOENT, errno.ESRCH): raise NoSuchProcess(self.pid, self._name) if err.errno in (errno.EPERM, errno.EACCES): raise AccessDenied(self.pid, self._name) raise return wrapper class Process(object): """Wrapper class around underlying C implementation.""" __slots__ = ["pid", "_name"] def __init__(self, pid): self.pid = pid self._name = None @wrap_exceptions def name(self): # note: max len == 15 return cext.proc_name_and_args(self.pid)[0] @wrap_exceptions def exe(self): # Will be guess later from cmdline but we want to explicitly # invoke cmdline here in order to get an AccessDenied # exception if the user has not enough privileges. self.cmdline() return "" @wrap_exceptions def cmdline(self): return cext.proc_name_and_args(self.pid)[1].split(' ') @wrap_exceptions def create_time(self): return cext.proc_basic_info(self.pid)[3] @wrap_exceptions def num_threads(self): return cext.proc_basic_info(self.pid)[5] @wrap_exceptions def nice_get(self): # For some reason getpriority(3) return ESRCH (no such process) # for certain low-pid processes, no matter what (even as root). # The process actually exists though, as it has a name, # creation time, etc. # The best thing we can do here appears to be raising AD. # Note: tested on Solaris 11; on Open Solaris 5 everything is # fine. try: return _psutil_posix.getpriority(self.pid) except EnvironmentError as err: if err.errno in (errno.ENOENT, errno.ESRCH): if pid_exists(self.pid): raise AccessDenied(self.pid, self._name) raise @wrap_exceptions def nice_set(self, value): if self.pid in (2, 3): # Special case PIDs: internally setpriority(3) return ESRCH # (no such process), no matter what. # The process actually exists though, as it has a name, # creation time, etc. raise AccessDenied(self.pid, self._name) return _psutil_posix.setpriority(self.pid, value) @wrap_exceptions def ppid(self): return cext.proc_basic_info(self.pid)[0] @wrap_exceptions def uids(self): real, effective, saved, _, _, _ = cext.proc_cred(self.pid) return _common.puids(real, effective, saved) @wrap_exceptions def gids(self): _, _, _, real, effective, saved = cext.proc_cred(self.pid) return _common.puids(real, effective, saved) @wrap_exceptions def cpu_times(self): user, system = cext.proc_cpu_times(self.pid) return _common.pcputimes(user, system) @wrap_exceptions def terminal(self): hit_enoent = False tty = wrap_exceptions( cext.proc_basic_info(self.pid)[0]) if tty != cext.PRNODEV: for x in (0, 1, 2, 255): try: return os.readlink('/proc/%d/path/%d' % (self.pid, x)) except OSError as err: if err.errno == errno.ENOENT: hit_enoent = True continue raise if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) @wrap_exceptions def cwd(self): # /proc/PID/path/cwd may not be resolved by readlink() even if # it exists (ls shows it). If that's the case and the process # is still alive return None (we can return None also on BSD). # Reference: http://goo.gl/55XgO try: return os.readlink("/proc/%s/path/cwd" % self.pid) except OSError as err: if err.errno == errno.ENOENT: os.stat("/proc/%s" % self.pid) return None raise @wrap_exceptions def memory_info(self): ret = cext.proc_basic_info(self.pid) rss, vms = ret[1] * 1024, ret[2] * 1024 return _common.pmem(rss, vms) # it seems Solaris uses rss and vms only memory_info_ex = memory_info @wrap_exceptions def status(self): code = cext.proc_basic_info(self.pid)[6] # XXX is '?' legit? (we're not supposed to return it anyway) return PROC_STATUSES.get(code, '?') @wrap_exceptions def threads(self): ret = [] tids = os.listdir('/proc/%d/lwp' % self.pid) hit_enoent = False for tid in tids: tid = int(tid) try: utime, stime = cext.query_process_thread( self.pid, tid) except EnvironmentError as err: # ENOENT == thread gone in meantime if err.errno == errno.ENOENT: hit_enoent = True continue raise else: nt = _common.pthread(tid, utime, stime) ret.append(nt) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return ret @wrap_exceptions def open_files(self): retlist = [] hit_enoent = False pathdir = '/proc/%d/path' % self.pid for fd in os.listdir('/proc/%d/fd' % self.pid): path = os.path.join(pathdir, fd) if os.path.islink(path): try: file = os.readlink(path) except OSError as err: # ENOENT == file which is gone in the meantime if err.errno == errno.ENOENT: hit_enoent = True continue raise else: if isfile_strict(file): retlist.append(_common.popenfile(file, int(fd))) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return retlist def _get_unix_sockets(self, pid): """Get UNIX sockets used by process by parsing 'pfiles' output.""" # TODO: rewrite this in C (...but the damn netstat source code # does not include this part! Argh!!) cmd = "pfiles %s" % pid p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if PY3: stdout, stderr = [x.decode(sys.stdout.encoding) for x in (stdout, stderr)] if p.returncode != 0: if 'permission denied' in stderr.lower(): raise AccessDenied(self.pid, self._name) if 'no such process' in stderr.lower(): raise NoSuchProcess(self.pid, self._name) raise RuntimeError("%r command error\n%s" % (cmd, stderr)) lines = stdout.split('\n')[2:] for i, line in enumerate(lines): line = line.lstrip() if line.startswith('sockname: AF_UNIX'): path = line.split(' ', 2)[2] type = lines[i - 2].strip() if type == 'SOCK_STREAM': type = socket.SOCK_STREAM elif type == 'SOCK_DGRAM': type = socket.SOCK_DGRAM else: type = -1 yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE) @wrap_exceptions def connections(self, kind='inet'): ret = net_connections(kind, _pid=self.pid) # The underlying C implementation retrieves all OS connections # and filters them by PID. At this point we can't tell whether # an empty list means there were no connections for process or # process is no longer active so we force NSP in case the PID # is no longer there. if not ret: os.stat('/proc/%s' % self.pid) # will raise NSP if process is gone # UNIX sockets if kind in ('all', 'unix'): ret.extend([_common.pconn(*conn) for conn in self._get_unix_sockets(self.pid)]) return ret nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked') nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked') @wrap_exceptions def memory_maps(self): def toaddr(start, end): return '%s-%s' % (hex(start)[2:].strip('L'), hex(end)[2:].strip('L')) retlist = [] rawlist = cext.proc_memory_maps(self.pid) hit_enoent = False for item in rawlist: addr, addrsize, perm, name, rss, anon, locked = item addr = toaddr(addr, addrsize) if not name.startswith('['): try: name = os.readlink('/proc/%s/path/%s' % (self.pid, name)) except OSError as err: if err.errno == errno.ENOENT: # sometimes the link may not be resolved by # readlink() even if it exists (ls shows it). # If that's the case we just return the # unresolved link path. # This seems an incosistency with /proc similar # to: http://goo.gl/55XgO name = '/proc/%s/path/%s' % (self.pid, name) hit_enoent = True else: raise retlist.append((addr, perm, name, rss, anon, locked)) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return retlist @wrap_exceptions def num_fds(self): return len(os.listdir("/proc/%s/fd" % self.pid)) @wrap_exceptions def num_ctx_switches(self): return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid)) @wrap_exceptions def wait(self, timeout=None): try: return _psposix.wait_pid(self.pid, timeout) except _psposix.TimeoutExpired: # support for private module import if TimeoutExpired is None: raise raise TimeoutExpired(timeout, self.pid, self._name)
brychanrobot/orrt-star-cpp
refs/heads/master
scripts/plot_online_rewiring.py
1
from operator import itemgetter import json import numpy as np import matplotlib.pyplot as plt j = json.load(open('scripts/onlineRewiring.json', 'r')) for spot in j: results = spot['results'] radius = spot['radius'] plt.plot([result['time'] for result in results], [result['entropy'] for result in results], label='radius=%.2f' % radius) plt.xlabel('time (s)') plt.ylabel('entropy') plt.legend(loc='lower left') plt.figure() final_entropies = [] radii = [] for spot in j: results = spot['results'] radii.append(spot['radius']) final_entropies.append(results[-1]['entropy']) plt.plot(radii, final_entropies) plt.xlabel('radius') plt.ylabel('entropy') plt.show()
open-synergy/partner-contact
refs/heads/8.0
partner_phone_extension/models/__init__.py
2
# -*- coding: utf-8 -*- # Copyright 2013-2014 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # Copyright 2017 Eficent Business and IT Consulting Services S.L. # (http://www.eficent.com) # Copyright 2017 Serpent Consulting Services Pvt. Ltd. # (<http://www.serpentcs.com>) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from . import res_partner
andybak/ansible-modules-extras
refs/heads/devel
cloud/webfaction/__init__.py
12133432
mcsosa121/cafa
refs/heads/master
cafaenv/lib/python2.7/site-packages/django/middleware/__init__.py
12133432
Azure/azure-sdk-for-python
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_11_01/_container_service_client.py
1
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from msrest import Deserializer, Serializer if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential from azure.core.pipeline.transport import HttpRequest, HttpResponse from ._configuration import ContainerServiceClientConfiguration from .operations import Operations from .operations import ManagedClustersOperations from .operations import AgentPoolsOperations from . import models class ContainerServiceClient(object): """The Container Service Client. :ivar operations: Operations operations :vartype operations: azure.mgmt.containerservice.v2019_11_01.operations.Operations :ivar managed_clusters: ManagedClustersOperations operations :vartype managed_clusters: azure.mgmt.containerservice.v2019_11_01.operations.ManagedClustersOperations :ivar agent_pools: AgentPoolsOperations operations :vartype agent_pools: azure.mgmt.containerservice.v2019_11_01.operations.AgentPoolsOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. :type subscription_id: str :param str base_url: Service URL :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential, # type: "TokenCredential" subscription_id, # type: str base_url=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> None if not base_url: base_url = 'https://management.azure.com' self._config = ContainerServiceClientConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._serialize.client_side_validation = False self._deserialize = Deserializer(client_models) self.operations = Operations( self._client, self._config, self._serialize, self._deserialize) self.managed_clusters = ManagedClustersOperations( self._client, self._config, self._serialize, self._deserialize) self.agent_pools = AgentPoolsOperations( self._client, self._config, self._serialize, self._deserialize) def _send_request(self, http_request, **kwargs): # type: (HttpRequest, Any) -> HttpResponse """Runs the network request through the client's chained policies. :param http_request: The network request you want to make. Required. :type http_request: ~azure.core.pipeline.transport.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to True. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.pipeline.transport.HttpResponse """ path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } http_request.url = self._client.format_url(http_request.url, **path_format_arguments) stream = kwargs.pop("stream", True) pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) return pipeline_response.http_response def close(self): # type: () -> None self._client.close() def __enter__(self): # type: () -> ContainerServiceClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
ChenJunor/hue
refs/heads/master
desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/layer1.py
135
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.ec2containerservice import exceptions class EC2ContainerServiceConnection(AWSQueryConnection): """ Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster of Amazon EC2 instances. Amazon ECS lets you launch and stop container-enabled applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features like security groups, Amazon EBS volumes, and IAM roles. You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon EC2 Container Service eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure. """ APIVersion = "2014-11-13" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com" ResponseError = JSONResponseError _faults = { "ServerException": exceptions.ServerException, "ClientException": exceptions.ClientException, } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) if 'host' not in kwargs or kwargs['host'] is None: kwargs['host'] = region.endpoint super(EC2ContainerServiceConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def create_cluster(self, cluster_name=None): """ Creates a new Amazon ECS cluster. By default, your account will receive a `default` cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the `CreateCluster` action. During the preview, each account is limited to two clusters. :type cluster_name: string :param cluster_name: The name of your cluster. If you do not specify a name for your cluster, you will create a cluster named `default`. """ params = {} if cluster_name is not None: params['clusterName'] = cluster_name return self._make_request( action='CreateCluster', verb='POST', path='/', params=params) def delete_cluster(self, cluster): """ Deletes the specified cluster. You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. :type cluster: string :param cluster: The cluster you want to delete. """ params = {'cluster': cluster, } return self._make_request( action='DeleteCluster', verb='POST', path='/', params=params) def deregister_container_instance(self, container_instance, cluster=None, force=None): """ Deregisters an Amazon ECS container instance from the specified cluster. This instance will no longer be available to run tasks. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance you want to deregister. If you do not specify a cluster, the default cluster is assumed. :type container_instance: string :param container_instance: The container instance UUID or full Amazon Resource Name (ARN) of the container instance you want to deregister. The ARN contains the `arn:aws:ecs` namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the `container-instance` namespace, and then the container instance UUID. For example, arn:aws:ecs: region : aws_account_id :container-instance/ container_instance_UUID . :type force: boolean :param force: Force the deregistration of the container instance. You can use the `force` parameter if you have several tasks running on a container instance and you don't want to run `StopTask` for each task before deregistering the container instance. """ params = {'containerInstance': container_instance, } if cluster is not None: params['cluster'] = cluster if force is not None: params['force'] = str( force).lower() return self._make_request( action='DeregisterContainerInstance', verb='POST', path='/', params=params) def deregister_task_definition(self, task_definition): """ Deregisters the specified task definition. You will no longer be able to run tasks from this definition after deregistration. :type task_definition: string :param task_definition: The `family` and `revision` ( `family:revision`) or full Amazon Resource Name (ARN) of the task definition that you want to deregister. """ params = {'taskDefinition': task_definition, } return self._make_request( action='DeregisterTaskDefinition', verb='POST', path='/', params=params) def describe_clusters(self, clusters=None): """ Describes one or more of your clusters. :type clusters: list :param clusters: A space-separated list of cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed. """ params = {} if clusters is not None: self.build_list_params(params, clusters, 'clusters.member') return self._make_request( action='DescribeClusters', verb='POST', path='/', params=params) def describe_container_instances(self, container_instances, cluster=None): """ Describes Amazon EC2 Container Service container instances. Returns metadata about registered and remaining resources on each container instance requested. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances you want to describe. If you do not specify a cluster, the default cluster is assumed. :type container_instances: list :param container_instances: A space-separated list of container instance UUIDs or full Amazon Resource Name (ARN) entries. """ params = {} self.build_list_params(params, container_instances, 'containerInstances.member') if cluster is not None: params['cluster'] = cluster return self._make_request( action='DescribeContainerInstances', verb='POST', path='/', params=params) def describe_task_definition(self, task_definition): """ Describes a task definition. :type task_definition: string :param task_definition: The `family` and `revision` ( `family:revision`) or full Amazon Resource Name (ARN) of the task definition that you want to describe. """ params = {'taskDefinition': task_definition, } return self._make_request( action='DescribeTaskDefinition', verb='POST', path='/', params=params) def describe_tasks(self, tasks, cluster=None): """ Describes a specified task or tasks. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task you want to describe. If you do not specify a cluster, the default cluster is assumed. :type tasks: list :param tasks: A space-separated list of task UUIDs or full Amazon Resource Name (ARN) entries. """ params = {} self.build_list_params(params, tasks, 'tasks.member') if cluster is not None: params['cluster'] = cluster return self._make_request( action='DescribeTasks', verb='POST', path='/', params=params) def discover_poll_endpoint(self, container_instance=None): """ This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent. Returns an endpoint for the Amazon EC2 Container Service agent to poll for updates. :type container_instance: string :param container_instance: The container instance UUID or full Amazon Resource Name (ARN) of the container instance. The ARN contains the `arn:aws:ecs` namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the `container-instance` namespace, and then the container instance UUID. For example, arn:aws:ecs: region : aws_account_id :container- instance/ container_instance_UUID . """ params = {} if container_instance is not None: params['containerInstance'] = container_instance return self._make_request( action='DiscoverPollEndpoint', verb='POST', path='/', params=params) def list_clusters(self, next_token=None, max_results=None): """ Returns a list of existing clusters. :type next_token: string :param next_token: The `nextToken` value returned from a previous paginated `ListClusters` request where `maxResults` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the `nextToken` value. This value is `null` when there are no more results to return. :type max_results: integer :param max_results: The maximum number of cluster results returned by `ListClusters` in paginated output. When this parameter is used, `ListClusters` only returns `maxResults` results in a single page along with a `nextToken` response element. The remaining results of the initial request can be seen by sending another `ListClusters` request with the returned `nextToken` value. This value can be between 1 and 100. If this parameter is not used, then `ListClusters` returns up to 100 results and a `nextToken` value if applicable. """ params = {} if next_token is not None: params['nextToken'] = next_token if max_results is not None: params['maxResults'] = max_results return self._make_request( action='ListClusters', verb='POST', path='/', params=params) def list_container_instances(self, cluster=None, next_token=None, max_results=None): """ Returns a list of container instances in a specified cluster. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances you want to list. If you do not specify a cluster, the default cluster is assumed.. :type next_token: string :param next_token: The `nextToken` value returned from a previous paginated `ListContainerInstances` request where `maxResults` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the `nextToken` value. This value is `null` when there are no more results to return. :type max_results: integer :param max_results: The maximum number of container instance results returned by `ListContainerInstances` in paginated output. When this parameter is used, `ListContainerInstances` only returns `maxResults` results in a single page along with a `nextToken` response element. The remaining results of the initial request can be seen by sending another `ListContainerInstances` request with the returned `nextToken` value. This value can be between 1 and 100. If this parameter is not used, then `ListContainerInstances` returns up to 100 results and a `nextToken` value if applicable. """ params = {} if cluster is not None: params['cluster'] = cluster if next_token is not None: params['nextToken'] = next_token if max_results is not None: params['maxResults'] = max_results return self._make_request( action='ListContainerInstances', verb='POST', path='/', params=params) def list_task_definitions(self, family_prefix=None, next_token=None, max_results=None): """ Returns a list of task definitions that are registered to your account. You can filter the results by family name with the `familyPrefix` parameter. :type family_prefix: string :param family_prefix: The name of the family that you want to filter the `ListTaskDefinitions` results with. Specifying a `familyPrefix` will limit the listed task definitions to definitions that belong to that family. :type next_token: string :param next_token: The `nextToken` value returned from a previous paginated `ListTaskDefinitions` request where `maxResults` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the `nextToken` value. This value is `null` when there are no more results to return. :type max_results: integer :param max_results: The maximum number of task definition results returned by `ListTaskDefinitions` in paginated output. When this parameter is used, `ListTaskDefinitions` only returns `maxResults` results in a single page along with a `nextToken` response element. The remaining results of the initial request can be seen by sending another `ListTaskDefinitions` request with the returned `nextToken` value. This value can be between 1 and 100. If this parameter is not used, then `ListTaskDefinitions` returns up to 100 results and a `nextToken` value if applicable. """ params = {} if family_prefix is not None: params['familyPrefix'] = family_prefix if next_token is not None: params['nextToken'] = next_token if max_results is not None: params['maxResults'] = max_results return self._make_request( action='ListTaskDefinitions', verb='POST', path='/', params=params) def list_tasks(self, cluster=None, container_instance=None, family=None, next_token=None, max_results=None): """ Returns a list of tasks for a specified cluster. You can filter the results by family name or by a particular container instance with the `family` and `containerInstance` parameters. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks you want to list. If you do not specify a cluster, the default cluster is assumed.. :type container_instance: string :param container_instance: The container instance UUID or full Amazon Resource Name (ARN) of the container instance that you want to filter the `ListTasks` results with. Specifying a `containerInstance` will limit the results to tasks that belong to that container instance. :type family: string :param family: The name of the family that you want to filter the `ListTasks` results with. Specifying a `family` will limit the results to tasks that belong to that family. :type next_token: string :param next_token: The `nextToken` value returned from a previous paginated `ListTasks` request where `maxResults` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the `nextToken` value. This value is `null` when there are no more results to return. :type max_results: integer :param max_results: The maximum number of task results returned by `ListTasks` in paginated output. When this parameter is used, `ListTasks` only returns `maxResults` results in a single page along with a `nextToken` response element. The remaining results of the initial request can be seen by sending another `ListTasks` request with the returned `nextToken` value. This value can be between 1 and 100. If this parameter is not used, then `ListTasks` returns up to 100 results and a `nextToken` value if applicable. """ params = {} if cluster is not None: params['cluster'] = cluster if container_instance is not None: params['containerInstance'] = container_instance if family is not None: params['family'] = family if next_token is not None: params['nextToken'] = next_token if max_results is not None: params['maxResults'] = max_results return self._make_request( action='ListTasks', verb='POST', path='/', params=params) def register_container_instance(self, cluster=None, instance_identity_document=None, instance_identity_document_signature=None, total_resources=None): """ This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent. Registers an Amazon EC2 instance into the specified cluster. This instance will become available to place containers on. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you want to register your container instance with. If you do not specify a cluster, the default cluster is assumed.. :type instance_identity_document: string :param instance_identity_document: :type instance_identity_document_signature: string :param instance_identity_document_signature: :type total_resources: list :param total_resources: """ params = {} if cluster is not None: params['cluster'] = cluster if instance_identity_document is not None: params['instanceIdentityDocument'] = instance_identity_document if instance_identity_document_signature is not None: params['instanceIdentityDocumentSignature'] = instance_identity_document_signature if total_resources is not None: self.build_complex_list_params( params, total_resources, 'totalResources.member', ('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue')) return self._make_request( action='RegisterContainerInstance', verb='POST', path='/', params=params) def register_task_definition(self, family, container_definitions): """ Registers a new task definition from the supplied `family` and `containerDefinitions`. :type family: string :param family: You can specify a `family` for a task definition, which allows you to track multiple versions of the same task definition. You can think of the `family` as a name for your task definition. :type container_definitions: list :param container_definitions: A list of container definitions in JSON format that describe the different containers that make up your task. """ params = {'family': family, } self.build_complex_list_params( params, container_definitions, 'containerDefinitions.member', ('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment')) return self._make_request( action='RegisterTaskDefinition', verb='POST', path='/', params=params) def run_task(self, task_definition, cluster=None, overrides=None, count=None): """ Start a task using random placement and the default Amazon ECS scheduler. If you want to use your own scheduler or place a task on a specific container instance, use `StartTask` instead. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you want to run your task on. If you do not specify a cluster, the default cluster is assumed.. :type task_definition: string :param task_definition: The `family` and `revision` ( `family:revision`) or full Amazon Resource Name (ARN) of the task definition that you want to run. :type overrides: dict :param overrides: :type count: integer :param count: The number of instances of the specified task that you would like to place on your cluster. """ params = {'taskDefinition': task_definition, } if cluster is not None: params['cluster'] = cluster if overrides is not None: params['overrides'] = overrides if count is not None: params['count'] = count return self._make_request( action='RunTask', verb='POST', path='/', params=params) def start_task(self, task_definition, container_instances, cluster=None, overrides=None): """ Starts a new task from the specified task definition on the specified container instance or instances. If you want to use the default Amazon ECS scheduler to place your task, use `RunTask` instead. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you want to start your task on. If you do not specify a cluster, the default cluster is assumed.. :type task_definition: string :param task_definition: The `family` and `revision` ( `family:revision`) or full Amazon Resource Name (ARN) of the task definition that you want to start. :type overrides: dict :param overrides: :type container_instances: list :param container_instances: The container instance UUIDs or full Amazon Resource Name (ARN) entries for the container instances on which you would like to place your task. """ params = {'taskDefinition': task_definition, } self.build_list_params(params, container_instances, 'containerInstances.member') if cluster is not None: params['cluster'] = cluster if overrides is not None: params['overrides'] = overrides return self._make_request( action='StartTask', verb='POST', path='/', params=params) def stop_task(self, task, cluster=None): """ Stops a running task. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task you want to stop. If you do not specify a cluster, the default cluster is assumed.. :type task: string :param task: The task UUIDs or full Amazon Resource Name (ARN) entry of the task you would like to stop. """ params = {'task': task, } if cluster is not None: params['cluster'] = cluster return self._make_request( action='StopTask', verb='POST', path='/', params=params) def submit_container_state_change(self, cluster=None, task=None, container_name=None, status=None, exit_code=None, reason=None, network_bindings=None): """ This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent. Sent to acknowledge that a container changed states. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container. :type task: string :param task: The task UUID or full Amazon Resource Name (ARN) of the task that hosts the container. :type container_name: string :param container_name: The name of the container. :type status: string :param status: The status of the state change request. :type exit_code: integer :param exit_code: The exit code returned for the state change request. :type reason: string :param reason: The reason for the state change request. :type network_bindings: list :param network_bindings: The network bindings of the container. """ params = {} if cluster is not None: params['cluster'] = cluster if task is not None: params['task'] = task if container_name is not None: params['containerName'] = container_name if status is not None: params['status'] = status if exit_code is not None: params['exitCode'] = exit_code if reason is not None: params['reason'] = reason if network_bindings is not None: self.build_complex_list_params( params, network_bindings, 'networkBindings.member', ('bindIP', 'containerPort', 'hostPort')) return self._make_request( action='SubmitContainerStateChange', verb='POST', path='/', params=params) def submit_task_state_change(self, cluster=None, task=None, status=None, reason=None): """ This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent. Sent to acknowledge that a task changed states. :type cluster: string :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task. :type task: string :param task: The task UUID or full Amazon Resource Name (ARN) of the task in the state change request. :type status: string :param status: The status of the state change request. :type reason: string :param reason: The reason for the state change request. """ params = {} if cluster is not None: params['cluster'] = cluster if task is not None: params['task'] = task if status is not None: params['status'] = status if reason is not None: params['reason'] = reason return self._make_request( action='SubmitTaskStateChange', verb='POST', path='/', params=params) def _make_request(self, action, verb, path, params): params['ContentType'] = 'JSON' response = self.make_request(action=action, verb='POST', path='/', params=params) body = response.read().decode('utf-8') boto.log.debug(body) if response.status == 200: return json.loads(body) else: json_body = json.loads(body) fault_name = json_body.get('Error', {}).get('Code', None) exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body)
ecogroup/p2pool-ecocoin
refs/heads/master
wstools/XMLSchema.py
289
# Copyright (c) 2003, The Regents of the University of California, # through Lawrence Berkeley National Laboratory (subject to receipt of # any required approvals from the U.S. Dept. of Energy). All rights # reserved. # # Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. ident = "$Id$" import types, weakref, sys, warnings from Namespaces import SCHEMA, XMLNS, SOAP, APACHE from Utility import DOM, DOMException, Collection, SplitQName, basejoin from StringIO import StringIO # If we have no threading, this should be a no-op try: from threading import RLock except ImportError: class RLock: def acquire(): pass def release(): pass # # Collections in XMLSchema class # TYPES = 'types' ATTRIBUTE_GROUPS = 'attr_groups' ATTRIBUTES = 'attr_decl' ELEMENTS = 'elements' MODEL_GROUPS = 'model_groups' BUILT_IN_NAMESPACES = [SOAP.ENC,] + SCHEMA.XSD_LIST + [APACHE.AXIS_NS] def GetSchema(component): """convience function for finding the parent XMLSchema instance. """ parent = component while not isinstance(parent, XMLSchema): parent = parent._parent() return parent class SchemaReader: """A SchemaReader creates XMLSchema objects from urls and xml data. """ namespaceToSchema = {} def __init__(self, domReader=None, base_url=None): """domReader -- class must implement DOMAdapterInterface base_url -- base url string """ self.__base_url = base_url self.__readerClass = domReader if not self.__readerClass: self.__readerClass = DOMAdapter self._includes = {} self._imports = {} def __setImports(self, schema): """Add dictionary of imports to schema instance. schema -- XMLSchema instance """ for ns,val in schema.imports.items(): if self._imports.has_key(ns): schema.addImportSchema(self._imports[ns]) def __setIncludes(self, schema): """Add dictionary of includes to schema instance. schema -- XMLSchema instance """ for schemaLocation, val in schema.includes.items(): if self._includes.has_key(schemaLocation): schema.addIncludeSchema(schemaLocation, self._imports[schemaLocation]) def addSchemaByLocation(self, location, schema): """provide reader with schema document for a location. """ self._includes[location] = schema def addSchemaByNamespace(self, schema): """provide reader with schema document for a targetNamespace. """ self._imports[schema.targetNamespace] = schema def loadFromNode(self, parent, element): """element -- DOM node or document parent -- WSDLAdapter instance """ reader = self.__readerClass(element) schema = XMLSchema(parent) #HACK to keep a reference schema.wsdl = parent schema.setBaseUrl(self.__base_url) schema.load(reader) return schema def loadFromStream(self, file, url=None): """Return an XMLSchema instance loaded from a file object. file -- file object url -- base location for resolving imports/includes. """ reader = self.__readerClass() reader.loadDocument(file) schema = XMLSchema() if url is not None: schema.setBaseUrl(url) schema.load(reader) self.__setIncludes(schema) self.__setImports(schema) return schema def loadFromString(self, data): """Return an XMLSchema instance loaded from an XML string. data -- XML string """ return self.loadFromStream(StringIO(data)) def loadFromURL(self, url, schema=None): """Return an XMLSchema instance loaded from the given url. url -- URL to dereference schema -- Optional XMLSchema instance. """ reader = self.__readerClass() if self.__base_url: url = basejoin(self.__base_url,url) reader.loadFromURL(url) schema = schema or XMLSchema() schema.setBaseUrl(url) schema.load(reader) self.__setIncludes(schema) self.__setImports(schema) return schema def loadFromFile(self, filename): """Return an XMLSchema instance loaded from the given file. filename -- name of file to open """ if self.__base_url: filename = basejoin(self.__base_url,filename) file = open(filename, 'rb') try: schema = self.loadFromStream(file, filename) finally: file.close() return schema class SchemaError(Exception): pass class NoSchemaLocationWarning(Exception): pass ########################### # DOM Utility Adapters ########################## class DOMAdapterInterface: def hasattr(self, attr, ns=None): """return true if node has attribute attr -- attribute to check for ns -- namespace of attribute, by default None """ raise NotImplementedError, 'adapter method not implemented' def getContentList(self, *contents): """returns an ordered list of child nodes *contents -- list of node names to return """ raise NotImplementedError, 'adapter method not implemented' def setAttributeDictionary(self, attributes): """set attribute dictionary """ raise NotImplementedError, 'adapter method not implemented' def getAttributeDictionary(self): """returns a dict of node's attributes """ raise NotImplementedError, 'adapter method not implemented' def getNamespace(self, prefix): """returns namespace referenced by prefix. """ raise NotImplementedError, 'adapter method not implemented' def getTagName(self): """returns tagName of node """ raise NotImplementedError, 'adapter method not implemented' def getParentNode(self): """returns parent element in DOMAdapter or None """ raise NotImplementedError, 'adapter method not implemented' def loadDocument(self, file): """load a Document from a file object file -- """ raise NotImplementedError, 'adapter method not implemented' def loadFromURL(self, url): """load a Document from an url url -- URL to dereference """ raise NotImplementedError, 'adapter method not implemented' class DOMAdapter(DOMAdapterInterface): """Adapter for ZSI.Utility.DOM """ def __init__(self, node=None): """Reset all instance variables. element -- DOM document, node, or None """ if hasattr(node, 'documentElement'): self.__node = node.documentElement else: self.__node = node self.__attributes = None def getNode(self): return self.__node def hasattr(self, attr, ns=None): """attr -- attribute ns -- optional namespace, None means unprefixed attribute. """ if not self.__attributes: self.setAttributeDictionary() if ns: return self.__attributes.get(ns,{}).has_key(attr) return self.__attributes.has_key(attr) def getContentList(self, *contents): nodes = [] ELEMENT_NODE = self.__node.ELEMENT_NODE for child in DOM.getElements(self.__node, None): if child.nodeType == ELEMENT_NODE and\ SplitQName(child.tagName)[1] in contents: nodes.append(child) return map(self.__class__, nodes) def setAttributeDictionary(self): self.__attributes = {} for v in self.__node._attrs.values(): self.__attributes[v.nodeName] = v.nodeValue def getAttributeDictionary(self): if not self.__attributes: self.setAttributeDictionary() return self.__attributes def getTagName(self): return self.__node.tagName def getParentNode(self): if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE: return DOMAdapter(self.__node.parentNode) return None def getNamespace(self, prefix): """prefix -- deference namespace prefix in node's context. Ascends parent nodes until found. """ namespace = None if prefix == 'xmlns': namespace = DOM.findDefaultNS(prefix, self.__node) else: try: namespace = DOM.findNamespaceURI(prefix, self.__node) except DOMException, ex: if prefix != 'xml': raise SchemaError, '%s namespace not declared for %s'\ %(prefix, self.__node._get_tagName()) namespace = XMLNS.XML return namespace def loadDocument(self, file): self.__node = DOM.loadDocument(file) if hasattr(self.__node, 'documentElement'): self.__node = self.__node.documentElement def loadFromURL(self, url): self.__node = DOM.loadFromURL(url) if hasattr(self.__node, 'documentElement'): self.__node = self.__node.documentElement class XMLBase: """ These class variables are for string indentation. """ tag = None __indent = 0 __rlock = RLock() def __str__(self): XMLBase.__rlock.acquire() XMLBase.__indent += 1 tmp = "<" + str(self.__class__) + '>\n' for k,v in self.__dict__.items(): tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v) XMLBase.__indent -= 1 XMLBase.__rlock.release() return tmp """Marker Interface: can determine something about an instances properties by using the provided convenience functions. """ class DefinitionMarker: """marker for definitions """ pass class DeclarationMarker: """marker for declarations """ pass class AttributeMarker: """marker for attributes """ pass class AttributeGroupMarker: """marker for attribute groups """ pass class WildCardMarker: """marker for wildcards """ pass class ElementMarker: """marker for wildcards """ pass class ReferenceMarker: """marker for references """ pass class ModelGroupMarker: """marker for model groups """ pass class AllMarker(ModelGroupMarker): """marker for all model group """ pass class ChoiceMarker(ModelGroupMarker): """marker for choice model group """ pass class SequenceMarker(ModelGroupMarker): """marker for sequence model group """ pass class ExtensionMarker: """marker for extensions """ pass class RestrictionMarker: """marker for restrictions """ facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\ 'maxLength', 'minExclusive', 'minInclusive', 'minLength',\ 'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace'] class SimpleMarker: """marker for simple type information """ pass class ListMarker: """marker for simple type list """ pass class UnionMarker: """marker for simple type Union """ pass class ComplexMarker: """marker for complex type information """ pass class LocalMarker: """marker for complex type information """ pass class MarkerInterface: def isDefinition(self): return isinstance(self, DefinitionMarker) def isDeclaration(self): return isinstance(self, DeclarationMarker) def isAttribute(self): return isinstance(self, AttributeMarker) def isAttributeGroup(self): return isinstance(self, AttributeGroupMarker) def isElement(self): return isinstance(self, ElementMarker) def isReference(self): return isinstance(self, ReferenceMarker) def isWildCard(self): return isinstance(self, WildCardMarker) def isModelGroup(self): return isinstance(self, ModelGroupMarker) def isAll(self): return isinstance(self, AllMarker) def isChoice(self): return isinstance(self, ChoiceMarker) def isSequence(self): return isinstance(self, SequenceMarker) def isExtension(self): return isinstance(self, ExtensionMarker) def isRestriction(self): return isinstance(self, RestrictionMarker) def isSimple(self): return isinstance(self, SimpleMarker) def isComplex(self): return isinstance(self, ComplexMarker) def isLocal(self): return isinstance(self, LocalMarker) def isList(self): return isinstance(self, ListMarker) def isUnion(self): return isinstance(self, UnionMarker) ########################################################## # Schema Components ######################################################### class XMLSchemaComponent(XMLBase, MarkerInterface): """ class variables: required -- list of required attributes attributes -- dict of default attribute values, including None. Value can be a function for runtime dependencies. contents -- dict of namespace keyed content lists. 'xsd' content of xsd namespace. xmlns_key -- key for declared xmlns namespace. xmlns -- xmlns is special prefix for namespace dictionary xml -- special xml prefix for xml namespace. """ required = [] attributes = {} contents = {} xmlns_key = '' xmlns = 'xmlns' xml = 'xml' def __init__(self, parent=None): """parent -- parent instance instance variables: attributes -- dictionary of node's attributes """ self.attributes = None self._parent = parent if self._parent: self._parent = weakref.ref(parent) if not self.__class__ == XMLSchemaComponent\ and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\ and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\ and type(self.__class__.contents) == type(XMLSchemaComponent.contents)): raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__ def getItemTrace(self): """Returns a node trace up to the <schema> item. """ item, path, name, ref = self, [], 'name', 'ref' while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter): attr = item.getAttribute(name) if not attr: attr = item.getAttribute(ref) if not attr: path.append('<%s>' %(item.tag)) else: path.append('<%s ref="%s">' %(item.tag, attr)) else: path.append('<%s name="%s">' %(item.tag,attr)) item = item._parent() try: tns = item.getTargetNamespace() except: tns = '' path.append('<%s targetNamespace="%s">' %(item.tag, tns)) path.reverse() return ''.join(path) def getTargetNamespace(self): """return targetNamespace """ parent = self targetNamespace = 'targetNamespace' tns = self.attributes.get(targetNamespace) while not tns and parent and parent._parent is not None: parent = parent._parent() tns = parent.attributes.get(targetNamespace) return tns or '' def getAttributeDeclaration(self, attribute): """attribute -- attribute with a QName value (eg. type). collection -- check types collection in parent Schema instance """ return self.getQNameAttribute(ATTRIBUTES, attribute) def getAttributeGroup(self, attribute): """attribute -- attribute with a QName value (eg. type). collection -- check types collection in parent Schema instance """ return self.getQNameAttribute(ATTRIBUTE_GROUPS, attribute) def getTypeDefinition(self, attribute): """attribute -- attribute with a QName value (eg. type). collection -- check types collection in parent Schema instance """ return self.getQNameAttribute(TYPES, attribute) def getElementDeclaration(self, attribute): """attribute -- attribute with a QName value (eg. element). collection -- check elements collection in parent Schema instance. """ return self.getQNameAttribute(ELEMENTS, attribute) def getModelGroup(self, attribute): """attribute -- attribute with a QName value (eg. ref). collection -- check model_group collection in parent Schema instance. """ return self.getQNameAttribute(MODEL_GROUPS, attribute) def getQNameAttribute(self, collection, attribute): """returns object instance representing QName --> (namespace,name), or if does not exist return None. attribute -- an information item attribute, with a QName value. collection -- collection in parent Schema instance to search. """ tdc = self.getAttributeQName(attribute) if not tdc: return obj = self.getSchemaItem(collection, tdc.getTargetNamespace(), tdc.getName()) if obj: return obj # raise SchemaError, 'No schema item "%s" in collection %s' %(tdc, collection) return def getSchemaItem(self, collection, namespace, name): """returns object instance representing namespace, name, or if does not exist return None if built-in, else raise SchemaError. namespace -- namespace item defined in. name -- name of item. collection -- collection in parent Schema instance to search. """ parent = GetSchema(self) if parent.targetNamespace == namespace: try: obj = getattr(parent, collection)[name] except KeyError, ex: raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\ %(namespace, collection, name) return obj if not parent.imports.has_key(namespace): if namespace in BUILT_IN_NAMESPACES: # built-in just return # WARNING: expecting import if "redefine" or add to built-in namespace. return raise SchemaError, 'schema "%s" does not import namespace "%s"' %( parent.targetNamespace, namespace) # Lazy Eval schema = parent.imports[namespace] if not isinstance(schema, XMLSchema): schema = schema.getSchema() if schema is not None: parent.imports[namespace] = schema if schema is None: if namespace in BUILT_IN_NAMESPACES: # built-in just return return raise SchemaError, 'no schema instance for imported namespace (%s).'\ %(namespace) if not isinstance(schema, XMLSchema): raise TypeError, 'expecting XMLSchema instance not "%r"' %schema try: obj = getattr(schema, collection)[name] except KeyError, ex: raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\ %(namespace, collection, name) return obj def getXMLNS(self, prefix=None): """deference prefix or by default xmlns, returns namespace. """ if prefix == XMLSchemaComponent.xml: return XMLNS.XML parent = self ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) while not ns: parent = parent._parent() ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) if not ns and isinstance(parent, WSDLToolsAdapter): if prefix is None: return '' raise SchemaError, 'unknown prefix %s' %prefix return ns def getAttribute(self, attribute): """return requested attribute value or None """ if type(attribute) in (list, tuple): if len(attribute) != 2: raise LookupError, 'To access attributes must use name or (namespace,name)' ns_dict = self.attributes.get(attribute[0]) if ns_dict is None: return None return ns_dict.get(attribute[1]) return self.attributes.get(attribute) def getAttributeQName(self, attribute): """return requested attribute value as (namespace,name) or None """ qname = self.getAttribute(attribute) if isinstance(qname, TypeDescriptionComponent) is True: return qname if qname is None: return None prefix,ncname = SplitQName(qname) namespace = self.getXMLNS(prefix) return TypeDescriptionComponent((namespace,ncname)) def getAttributeName(self): """return attribute name or None """ return self.getAttribute('name') def setAttributes(self, node): """Sets up attribute dictionary, checks for required attributes and sets default attribute values. attr is for default attribute values determined at runtime. structure of attributes dictionary ['xmlns'][xmlns_key] -- xmlns namespace ['xmlns'][prefix] -- declared namespace prefix [namespace][prefix] -- attributes declared in a namespace [attribute] -- attributes w/o prefix, default namespaces do not directly apply to attributes, ie Name can't collide with QName. """ self.attributes = {XMLSchemaComponent.xmlns:{}} for k,v in node.getAttributeDictionary().items(): prefix,value = SplitQName(k) if value == XMLSchemaComponent.xmlns: self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v elif prefix: ns = node.getNamespace(prefix) if not ns: raise SchemaError, 'no namespace for attribute prefix %s'\ %prefix if not self.attributes.has_key(ns): self.attributes[ns] = {} elif self.attributes[ns].has_key(value): raise SchemaError, 'attribute %s declared multiple times in %s'\ %(value, ns) self.attributes[ns][value] = v elif not self.attributes.has_key(value): self.attributes[value] = v else: raise SchemaError, 'attribute %s declared multiple times' %value if not isinstance(self, WSDLToolsAdapter): self.__checkAttributes() self.__setAttributeDefaults() #set QNames for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']: if self.attributes.has_key(k): prefix, value = SplitQName(self.attributes.get(k)) self.attributes[k] = \ TypeDescriptionComponent((self.getXMLNS(prefix), value)) #Union, memberTypes is a whitespace separated list of QNames for k in ['memberTypes']: if self.attributes.has_key(k): qnames = self.attributes[k] self.attributes[k] = [] for qname in qnames.split(): prefix, value = SplitQName(qname) self.attributes['memberTypes'].append(\ TypeDescriptionComponent(\ (self.getXMLNS(prefix), value))) def getContents(self, node): """retrieve xsd contents """ return node.getContentList(*self.__class__.contents['xsd']) def __setAttributeDefaults(self): """Looks for default values for unset attributes. If class variable representing attribute is None, then it must be defined as an instance variable. """ for k,v in self.__class__.attributes.items(): if v is not None and self.attributes.has_key(k) is False: if isinstance(v, types.FunctionType): self.attributes[k] = v(self) else: self.attributes[k] = v def __checkAttributes(self): """Checks that required attributes have been defined, attributes w/default cannot be required. Checks all defined attributes are legal, attribute references are not subject to this test. """ for a in self.__class__.required: if not self.attributes.has_key(a): raise SchemaError,\ 'class instance %s, missing required attribute %s'\ %(self.__class__, a) for a,v in self.attributes.items(): # attribute #other, ie. not in empty namespace if type(v) is dict: continue # predefined prefixes xmlns, xml if a in (XMLSchemaComponent.xmlns, XMLNS.XML): continue if (a not in self.__class__.attributes.keys()) and not\ (self.isAttribute() and self.isReference()): raise SchemaError, '%s, unknown attribute(%s,%s)' \ %(self.getItemTrace(), a, self.attributes[a]) class WSDLToolsAdapter(XMLSchemaComponent): """WSDL Adapter to grab the attributes from the wsdl document node. """ attributes = {'name':None, 'targetNamespace':None} tag = 'definitions' def __init__(self, wsdl): XMLSchemaComponent.__init__(self, parent=wsdl) self.setAttributes(DOMAdapter(wsdl.document)) def getImportSchemas(self): """returns WSDLTools.WSDL types Collection """ return self._parent().types class Notation(XMLSchemaComponent): """<notation> parent: schema attributes: id -- ID name -- NCName, Required public -- token, Required system -- anyURI contents: annotation? """ required = ['name', 'public'] attributes = {'id':None, 'name':None, 'public':None, 'system':None} contents = {'xsd':('annotation')} tag = 'notation' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class Annotation(XMLSchemaComponent): """<annotation> parent: all,any,anyAttribute,attribute,attributeGroup,choice,complexContent, complexType,element,extension,field,group,import,include,key,keyref, list,notation,redefine,restriction,schema,selector,simpleContent, simpleType,union,unique attributes: id -- ID contents: (documentation | appinfo)* """ attributes = {'id':None} contents = {'xsd':('documentation', 'appinfo')} tag = 'annotation' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component == 'documentation': #print_debug('class %s, documentation skipped' %self.__class__, 5) continue elif component == 'appinfo': #print_debug('class %s, appinfo skipped' %self.__class__, 5) continue else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class Documentation(XMLSchemaComponent): """<documentation> parent: annotation attributes: source, anyURI xml:lang, language contents: mixed, any """ attributes = {'source':None, 'xml:lang':None} contents = {'xsd':('mixed', 'any')} tag = 'documentation' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component == 'mixed': #print_debug('class %s, mixed skipped' %self.__class__, 5) continue elif component == 'any': #print_debug('class %s, any skipped' %self.__class__, 5) continue else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class Appinfo(XMLSchemaComponent): """<appinfo> parent: annotation attributes: source, anyURI contents: mixed, any """ attributes = {'source':None, 'anyURI':None} contents = {'xsd':('mixed', 'any')} tag = 'appinfo' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component == 'mixed': #print_debug('class %s, mixed skipped' %self.__class__, 5) continue elif component == 'any': #print_debug('class %s, any skipped' %self.__class__, 5) continue else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class XMLSchemaFake: # This is temporary, for the benefit of WSDL until the real thing works. def __init__(self, element): self.targetNamespace = DOM.getAttr(element, 'targetNamespace') self.element = element class XMLSchema(XMLSchemaComponent): """A schema is a collection of schema components derived from one or more schema documents, that is, one or more <schema> element information items. It represents the abstract notion of a schema rather than a single schema document (or other representation). <schema> parent: ROOT attributes: id -- ID version -- token xml:lang -- language targetNamespace -- anyURI attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified' elementFormDefault -- 'qualified' | 'unqualified', 'unqualified' blockDefault -- '#all' | list of ('substitution | 'extension' | 'restriction') finalDefault -- '#all' | list of ('extension' | 'restriction' | 'list' | 'union') contents: ((include | import | redefine | annotation)*, (attribute, attributeGroup, complexType, element, group, notation, simpleType)*, annotation*)* attributes -- schema attributes imports -- import statements includes -- include statements redefines -- types -- global simpleType, complexType definitions elements -- global element declarations attr_decl -- global attribute declarations attr_groups -- attribute Groups model_groups -- model Groups notations -- global notations """ attributes = {'id':None, 'version':None, 'xml:lang':None, 'targetNamespace':None, 'attributeFormDefault':'unqualified', 'elementFormDefault':'unqualified', 'blockDefault':None, 'finalDefault':None} contents = {'xsd':('include', 'import', 'redefine', 'annotation', 'attribute', 'attributeGroup', 'complexType', 'element', 'group', 'notation', 'simpleType', 'annotation')} empty_namespace = '' tag = 'schema' def __init__(self, parent=None): """parent -- instance variables: targetNamespace -- schema's declared targetNamespace, or empty string. _imported_schemas -- namespace keyed dict of schema dependencies, if a schema is provided instance will not resolve import statement. _included_schemas -- schemaLocation keyed dict of component schemas, if schema is provided instance will not resolve include statement. _base_url -- needed for relative URLs support, only works with URLs relative to initial document. includes -- collection of include statements imports -- collection of import statements elements -- collection of global element declarations types -- collection of global type definitions attr_decl -- collection of global attribute declarations attr_groups -- collection of global attribute group definitions model_groups -- collection of model group definitions notations -- collection of notations """ self.__node = None self.targetNamespace = None XMLSchemaComponent.__init__(self, parent) f = lambda k: k.attributes['name'] ns = lambda k: k.attributes['namespace'] sl = lambda k: k.attributes['schemaLocation'] self.includes = Collection(self, key=sl) self.imports = Collection(self, key=ns) self.elements = Collection(self, key=f) self.types = Collection(self, key=f) self.attr_decl = Collection(self, key=f) self.attr_groups = Collection(self, key=f) self.model_groups = Collection(self, key=f) self.notations = Collection(self, key=f) self._imported_schemas = {} self._included_schemas = {} self._base_url = None def getNode(self): """ Interacting with the underlying DOM tree. """ return self.__node def addImportSchema(self, schema): """for resolving import statements in Schema instance schema -- schema instance _imported_schemas """ if not isinstance(schema, XMLSchema): raise TypeError, 'expecting a Schema instance' if schema.targetNamespace != self.targetNamespace: self._imported_schemas[schema.targetNamespace] = schema else: raise SchemaError, 'import schema bad targetNamespace' def addIncludeSchema(self, schemaLocation, schema): """for resolving include statements in Schema instance schemaLocation -- schema location schema -- schema instance _included_schemas """ if not isinstance(schema, XMLSchema): raise TypeError, 'expecting a Schema instance' if not schema.targetNamespace or\ schema.targetNamespace == self.targetNamespace: self._included_schemas[schemaLocation] = schema else: raise SchemaError, 'include schema bad targetNamespace' def setImportSchemas(self, schema_dict): """set the import schema dictionary, which is used to reference depedent schemas. """ self._imported_schemas = schema_dict def getImportSchemas(self): """get the import schema dictionary, which is used to reference depedent schemas. """ return self._imported_schemas def getSchemaNamespacesToImport(self): """returns tuple of namespaces the schema instance has declared itself to be depedent upon. """ return tuple(self.includes.keys()) def setIncludeSchemas(self, schema_dict): """set the include schema dictionary, which is keyed with schemaLocation (uri). This is a means of providing schemas to the current schema for content inclusion. """ self._included_schemas = schema_dict def getIncludeSchemas(self): """get the include schema dictionary, which is keyed with schemaLocation (uri). """ return self._included_schemas def getBaseUrl(self): """get base url, used for normalizing all relative uri's """ return self._base_url def setBaseUrl(self, url): """set base url, used for normalizing all relative uri's """ self._base_url = url def getElementFormDefault(self): """return elementFormDefault attribute """ return self.attributes.get('elementFormDefault') def isElementFormDefaultQualified(self): return self.attributes.get('elementFormDefault') == 'qualified' def getAttributeFormDefault(self): """return attributeFormDefault attribute """ return self.attributes.get('attributeFormDefault') def getBlockDefault(self): """return blockDefault attribute """ return self.attributes.get('blockDefault') def getFinalDefault(self): """return finalDefault attribute """ return self.attributes.get('finalDefault') def load(self, node, location=None): self.__node = node pnode = node.getParentNode() if pnode: pname = SplitQName(pnode.getTagName())[1] if pname == 'types': attributes = {} self.setAttributes(pnode) attributes.update(self.attributes) self.setAttributes(node) for k,v in attributes['xmlns'].items(): if not self.attributes['xmlns'].has_key(k): self.attributes['xmlns'][k] = v else: self.setAttributes(node) else: self.setAttributes(node) self.targetNamespace = self.getTargetNamespace() for childNode in self.getContents(node): component = SplitQName(childNode.getTagName())[1] if component == 'include': tp = self.__class__.Include(self) tp.fromDom(childNode) sl = tp.attributes['schemaLocation'] schema = tp.getSchema() if not self.getIncludeSchemas().has_key(sl): self.addIncludeSchema(sl, schema) self.includes[sl] = tp pn = childNode.getParentNode().getNode() pn.removeChild(childNode.getNode()) for child in schema.getNode().getNode().childNodes: pn.appendChild(child.cloneNode(1)) for collection in ['imports','elements','types', 'attr_decl','attr_groups','model_groups', 'notations']: for k,v in getattr(schema,collection).items(): if not getattr(self,collection).has_key(k): v._parent = weakref.ref(self) getattr(self,collection)[k] = v else: warnings.warn("Not keeping schema component.") elif component == 'import': slocd = SchemaReader.namespaceToSchema tp = self.__class__.Import(self) tp.fromDom(childNode) import_ns = tp.getAttribute('namespace') or\ self.__class__.empty_namespace schema = slocd.get(import_ns) if schema is None: schema = XMLSchema() slocd[import_ns] = schema try: tp.loadSchema(schema) except NoSchemaLocationWarning, ex: # Dependency declaration, hopefully implementation # is aware of this namespace (eg. SOAP,WSDL,?) print "IMPORT: ", import_ns print ex del slocd[import_ns] continue except SchemaError, ex: #warnings.warn(\ # '<import namespace="%s" schemaLocation=?>, %s'\ # %(import_ns, 'failed to load schema instance') #) print ex del slocd[import_ns] class _LazyEvalImport(str): '''Lazy evaluation of import, replace entry in self.imports.''' #attributes = dict(namespace=import_ns) def getSchema(namespace): schema = slocd.get(namespace) if schema is None: parent = self._parent() wstypes = parent if isinstance(parent, WSDLToolsAdapter): wstypes = parent.getImportSchemas() schema = wstypes.get(namespace) if isinstance(schema, XMLSchema): self.imports[namespace] = schema return schema return None self.imports[import_ns] = _LazyEvalImport(import_ns) continue else: tp._schema = schema if self.getImportSchemas().has_key(import_ns): warnings.warn(\ 'Detected multiple imports of the namespace "%s" '\ %import_ns) self.addImportSchema(schema) # spec says can have multiple imports of same namespace # but purpose of import is just dependency declaration. self.imports[import_ns] = tp elif component == 'redefine': warnings.warn('redefine is ignored') elif component == 'annotation': warnings.warn('annotation is ignored') elif component == 'attribute': tp = AttributeDeclaration(self) tp.fromDom(childNode) self.attr_decl[tp.getAttribute('name')] = tp elif component == 'attributeGroup': tp = AttributeGroupDefinition(self) tp.fromDom(childNode) self.attr_groups[tp.getAttribute('name')] = tp elif component == 'element': tp = ElementDeclaration(self) tp.fromDom(childNode) self.elements[tp.getAttribute('name')] = tp elif component == 'group': tp = ModelGroupDefinition(self) tp.fromDom(childNode) self.model_groups[tp.getAttribute('name')] = tp elif component == 'notation': tp = Notation(self) tp.fromDom(childNode) self.notations[tp.getAttribute('name')] = tp elif component == 'complexType': tp = ComplexType(self) tp.fromDom(childNode) self.types[tp.getAttribute('name')] = tp elif component == 'simpleType': tp = SimpleType(self) tp.fromDom(childNode) self.types[tp.getAttribute('name')] = tp else: break class Import(XMLSchemaComponent): """<import> parent: schema attributes: id -- ID namespace -- anyURI schemaLocation -- anyURI contents: annotation? """ attributes = {'id':None, 'namespace':None, 'schemaLocation':None} contents = {'xsd':['annotation']} tag = 'import' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self._schema = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) if self.attributes['namespace'] == self.getTargetNamespace(): raise SchemaError, 'namespace of schema and import match' for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) def getSchema(self): """if schema is not defined, first look for a Schema class instance in parent Schema. Else if not defined resolve schemaLocation and create a new Schema class instance, and keep a hard reference. """ if not self._schema: ns = self.attributes['namespace'] schema = self._parent().getImportSchemas().get(ns) if not schema and self._parent()._parent: schema = self._parent()._parent().getImportSchemas().get(ns) if not schema: url = self.attributes.get('schemaLocation') if not url: raise SchemaError, 'namespace(%s) is unknown' %ns base_url = self._parent().getBaseUrl() reader = SchemaReader(base_url=base_url) reader._imports = self._parent().getImportSchemas() reader._includes = self._parent().getIncludeSchemas() self._schema = reader.loadFromURL(url) return self._schema or schema def loadSchema(self, schema): """ """ base_url = self._parent().getBaseUrl() reader = SchemaReader(base_url=base_url) reader._imports = self._parent().getImportSchemas() reader._includes = self._parent().getIncludeSchemas() self._schema = schema if not self.attributes.has_key('schemaLocation'): raise NoSchemaLocationWarning('no schemaLocation attribute in import') reader.loadFromURL(self.attributes.get('schemaLocation'), schema) class Include(XMLSchemaComponent): """<include schemaLocation> parent: schema attributes: id -- ID schemaLocation -- anyURI, required contents: annotation? """ required = ['schemaLocation'] attributes = {'id':None, 'schemaLocation':None} contents = {'xsd':['annotation']} tag = 'include' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self._schema = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) def getSchema(self): """if schema is not defined, first look for a Schema class instance in parent Schema. Else if not defined resolve schemaLocation and create a new Schema class instance. """ if not self._schema: schema = self._parent() self._schema = schema.getIncludeSchemas().get(\ self.attributes['schemaLocation'] ) if not self._schema: url = self.attributes['schemaLocation'] reader = SchemaReader(base_url=schema.getBaseUrl()) reader._imports = schema.getImportSchemas() reader._includes = schema.getIncludeSchemas() # create schema before loading so chameleon include # will evalute targetNamespace correctly. self._schema = XMLSchema(schema) reader.loadFromURL(url, self._schema) return self._schema class AttributeDeclaration(XMLSchemaComponent,\ AttributeMarker,\ DeclarationMarker): """<attribute name> parent: schema attributes: id -- ID name -- NCName, required type -- QName default -- string fixed -- string contents: annotation?, simpleType? """ required = ['name'] attributes = {'id':None, 'name':None, 'type':None, 'default':None, 'fixed':None} contents = {'xsd':['annotation','simpleType']} tag = 'attribute' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): """ No list or union support """ self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) elif component == 'simpleType': self.content = AnonymousSimpleType(self) self.content.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class LocalAttributeDeclaration(AttributeDeclaration,\ AttributeMarker,\ LocalMarker,\ DeclarationMarker): """<attribute name> parent: complexType, restriction, extension, attributeGroup attributes: id -- ID name -- NCName, required type -- QName form -- ('qualified' | 'unqualified'), schema.attributeFormDefault use -- ('optional' | 'prohibited' | 'required'), optional default -- string fixed -- string contents: annotation?, simpleType? """ required = ['name'] attributes = {'id':None, 'name':None, 'type':None, 'form':lambda self: GetSchema(self).getAttributeFormDefault(), 'use':'optional', 'default':None, 'fixed':None} contents = {'xsd':['annotation','simpleType']} def __init__(self, parent): AttributeDeclaration.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) elif component == 'simpleType': self.content = AnonymousSimpleType(self) self.content.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class AttributeWildCard(XMLSchemaComponent,\ AttributeMarker,\ DeclarationMarker,\ WildCardMarker): """<anyAttribute> parents: complexType, restriction, extension, attributeGroup attributes: id -- ID namespace -- '##any' | '##other' | (anyURI* | '##targetNamespace' | '##local'), ##any processContents -- 'lax' | 'skip' | 'strict', strict contents: annotation? """ attributes = {'id':None, 'namespace':'##any', 'processContents':'strict'} contents = {'xsd':['annotation']} tag = 'anyAttribute' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class AttributeReference(XMLSchemaComponent,\ AttributeMarker,\ ReferenceMarker): """<attribute ref> parents: complexType, restriction, extension, attributeGroup attributes: id -- ID ref -- QName, required use -- ('optional' | 'prohibited' | 'required'), optional default -- string fixed -- string contents: annotation? """ required = ['ref'] attributes = {'id':None, 'ref':None, 'use':'optional', 'default':None, 'fixed':None} contents = {'xsd':['annotation']} tag = 'attribute' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def getAttributeDeclaration(self, attribute='ref'): return XMLSchemaComponent.getAttributeDeclaration(self, attribute) def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class AttributeGroupDefinition(XMLSchemaComponent,\ AttributeGroupMarker,\ DefinitionMarker): """<attributeGroup name> parents: schema, redefine attributes: id -- ID name -- NCName, required contents: annotation?, (attribute | attributeGroup)*, anyAttribute? """ required = ['name'] attributes = {'id':None, 'name':None} contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']} tag = 'attributeGroup' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.attr_content = None def getAttributeContent(self): return self.attr_content def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for indx in range(len(contents)): component = SplitQName(contents[indx].getTagName())[1] if (component == 'annotation') and (not indx): self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) elif component == 'attribute': if contents[indx].hasattr('name'): content.append(LocalAttributeDeclaration(self)) elif contents[indx].hasattr('ref'): content.append(AttributeReference(self)) else: raise SchemaError, 'Unknown attribute type' content[-1].fromDom(contents[indx]) elif component == 'attributeGroup': content.append(AttributeGroupReference(self)) content[-1].fromDom(contents[indx]) elif component == 'anyAttribute': if len(contents) != indx+1: raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace() content.append(AttributeWildCard(self)) content[-1].fromDom(contents[indx]) else: raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName()) self.attr_content = tuple(content) class AttributeGroupReference(XMLSchemaComponent,\ AttributeGroupMarker,\ ReferenceMarker): """<attributeGroup ref> parents: complexType, restriction, extension, attributeGroup attributes: id -- ID ref -- QName, required contents: annotation? """ required = ['ref'] attributes = {'id':None, 'ref':None} contents = {'xsd':['annotation']} tag = 'attributeGroup' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def getAttributeGroup(self, attribute='ref'): """attribute -- attribute with a QName value (eg. type). collection -- check types collection in parent Schema instance """ return XMLSchemaComponent.getAttributeGroup(self, attribute) def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) ###################################################### # Elements ##################################################### class IdentityConstrants(XMLSchemaComponent): """Allow one to uniquely identify nodes in a document and ensure the integrity of references between them. attributes -- dictionary of attributes selector -- XPath to selected nodes fields -- list of XPath to key field """ def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.selector = None self.fields = None self.annotation = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) fields = [] for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) elif component == 'selector': self.selector = self.Selector(self) self.selector.fromDom(i) continue elif component == 'field': fields.append(self.Field(self)) fields[-1].fromDom(i) continue else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.fields = tuple(fields) class Constraint(XMLSchemaComponent): def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class Selector(Constraint): """<selector xpath> parent: unique, key, keyref attributes: id -- ID xpath -- XPath subset, required contents: annotation? """ required = ['xpath'] attributes = {'id':None, 'xpath':None} contents = {'xsd':['annotation']} tag = 'selector' class Field(Constraint): """<field xpath> parent: unique, key, keyref attributes: id -- ID xpath -- XPath subset, required contents: annotation? """ required = ['xpath'] attributes = {'id':None, 'xpath':None} contents = {'xsd':['annotation']} tag = 'field' class Unique(IdentityConstrants): """<unique name> Enforce fields are unique w/i a specified scope. parent: element attributes: id -- ID name -- NCName, required contents: annotation?, selector, field+ """ required = ['name'] attributes = {'id':None, 'name':None} contents = {'xsd':['annotation', 'selector', 'field']} tag = 'unique' class Key(IdentityConstrants): """<key name> Enforce fields are unique w/i a specified scope, and all field values are present w/i document. Fields cannot be nillable. parent: element attributes: id -- ID name -- NCName, required contents: annotation?, selector, field+ """ required = ['name'] attributes = {'id':None, 'name':None} contents = {'xsd':['annotation', 'selector', 'field']} tag = 'key' class KeyRef(IdentityConstrants): """<keyref name refer> Ensure a match between two sets of values in an instance. parent: element attributes: id -- ID name -- NCName, required refer -- QName, required contents: annotation?, selector, field+ """ required = ['name', 'refer'] attributes = {'id':None, 'name':None, 'refer':None} contents = {'xsd':['annotation', 'selector', 'field']} tag = 'keyref' class ElementDeclaration(XMLSchemaComponent,\ ElementMarker,\ DeclarationMarker): """<element name> parents: schema attributes: id -- ID name -- NCName, required type -- QName default -- string fixed -- string nillable -- boolean, false abstract -- boolean, false substitutionGroup -- QName block -- ('#all' | ('substition' | 'extension' | 'restriction')*), schema.blockDefault final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault contents: annotation?, (simpleType,complexType)?, (key | keyref | unique)* """ required = ['name'] attributes = {'id':None, 'name':None, 'type':None, 'default':None, 'fixed':None, 'nillable':0, 'abstract':0, 'substitutionGroup':None, 'block':lambda self: self._parent().getBlockDefault(), 'final':lambda self: self._parent().getFinalDefault()} contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\ 'keyref', 'unique']} tag = 'element' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None self.constraints = () def isQualified(self): """Global elements are always qualified. """ return True def getAttribute(self, attribute): """return attribute. If attribute is type and it's None, and no simple or complex content, return the default type "xsd:anyType" """ value = XMLSchemaComponent.getAttribute(self, attribute) if attribute != 'type' or value is not None: return value if self.content is not None: return None parent = self while 1: nsdict = parent.attributes[XMLSchemaComponent.xmlns] for k,v in nsdict.items(): if v not in SCHEMA.XSD_LIST: continue return TypeDescriptionComponent((v, 'anyType')) if isinstance(parent, WSDLToolsAdapter)\ or not hasattr(parent, '_parent'): break parent = parent._parent() raise SchemaError, 'failed to locate the XSD namespace' def getElementDeclaration(self, attribute): raise Warning, 'invalid operation for <%s>' %self.tag def getTypeDefinition(self, attribute=None): """If attribute is None, "type" is assumed, return the corresponding representation of the global type definition (TypeDefinition), or the local definition if don't find "type". To maintain backwards compat, if attribute is provided call base class method. """ if attribute: return XMLSchemaComponent.getTypeDefinition(self, attribute) gt = XMLSchemaComponent.getTypeDefinition(self, 'type') if gt: return gt return self.content def getConstraints(self): return self._constraints def setConstraints(self, constraints): self._constraints = tuple(constraints) constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints") def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) constraints = [] for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) elif component == 'simpleType' and not self.content: self.content = AnonymousSimpleType(self) self.content.fromDom(i) elif component == 'complexType' and not self.content: self.content = LocalComplexType(self) self.content.fromDom(i) elif component == 'key': constraints.append(Key(self)) constraints[-1].fromDom(i) elif component == 'keyref': constraints.append(KeyRef(self)) constraints[-1].fromDom(i) elif component == 'unique': constraints.append(Unique(self)) constraints[-1].fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.constraints = constraints class LocalElementDeclaration(ElementDeclaration,\ LocalMarker): """<element> parents: all, choice, sequence attributes: id -- ID name -- NCName, required form -- ('qualified' | 'unqualified'), schema.elementFormDefault type -- QName minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 default -- string fixed -- string nillable -- boolean, false block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault contents: annotation?, (simpleType,complexType)?, (key | keyref | unique)* """ required = ['name'] attributes = {'id':None, 'name':None, 'form':lambda self: GetSchema(self).getElementFormDefault(), 'type':None, 'minOccurs':'1', 'maxOccurs':'1', 'default':None, 'fixed':None, 'nillable':0, 'abstract':0, 'block':lambda self: GetSchema(self).getBlockDefault()} contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\ 'keyref', 'unique']} def isQualified(self): """ Local elements can be qualified or unqualifed according to the attribute form, or the elementFormDefault. By default local elements are unqualified. """ form = self.getAttribute('form') if form == 'qualified': return True if form == 'unqualified': return False raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace()) class ElementReference(XMLSchemaComponent,\ ElementMarker,\ ReferenceMarker): """<element ref> parents: all, choice, sequence attributes: id -- ID ref -- QName, required minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 contents: annotation? """ required = ['ref'] attributes = {'id':None, 'ref':None, 'minOccurs':'1', 'maxOccurs':'1'} contents = {'xsd':['annotation']} tag = 'element' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def getElementDeclaration(self, attribute=None): """If attribute is None, "ref" is assumed, return the corresponding representation of the global element declaration (ElementDeclaration), To maintain backwards compat, if attribute is provided call base class method. """ if attribute: return XMLSchemaComponent.getElementDeclaration(self, attribute) return XMLSchemaComponent.getElementDeclaration(self, 'ref') def fromDom(self, node): self.annotation = None self.setAttributes(node) for i in self.getContents(node): component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class ElementWildCard(LocalElementDeclaration, WildCardMarker): """<any> parents: choice, sequence attributes: id -- ID minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 namespace -- '##any' | '##other' | (anyURI* | '##targetNamespace' | '##local'), ##any processContents -- 'lax' | 'skip' | 'strict', strict contents: annotation? """ required = [] attributes = {'id':None, 'minOccurs':'1', 'maxOccurs':'1', 'namespace':'##any', 'processContents':'strict'} contents = {'xsd':['annotation']} tag = 'any' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def isQualified(self): """ Global elements are always qualified, but if processContents are not strict could have dynamically generated local elements. """ return GetSchema(self).isElementFormDefaultQualified() def getAttribute(self, attribute): """return attribute. """ return XMLSchemaComponent.getAttribute(self, attribute) def getTypeDefinition(self, attribute): raise Warning, 'invalid operation for <%s>' % self.tag def fromDom(self, node): self.annotation = None self.setAttributes(node) for i in self.getContents(node): component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) ###################################################### # Model Groups ##################################################### class Sequence(XMLSchemaComponent,\ SequenceMarker): """<sequence> parents: complexType, extension, restriction, group, choice, sequence attributes: id -- ID minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 contents: annotation?, (element | group | choice | sequence | any)* """ attributes = {'id':None, 'minOccurs':'1', 'maxOccurs':'1'} contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\ 'any']} tag = 'sequence' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) continue elif component == 'element': if i.hasattr('ref'): content.append(ElementReference(self)) else: content.append(LocalElementDeclaration(self)) elif component == 'group': content.append(ModelGroupReference(self)) elif component == 'choice': content.append(Choice(self)) elif component == 'sequence': content.append(Sequence(self)) elif component == 'any': content.append(ElementWildCard(self)) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) content[-1].fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class All(XMLSchemaComponent,\ AllMarker): """<all> parents: complexType, extension, restriction, group attributes: id -- ID minOccurs -- '0' | '1', 1 maxOccurs -- '1', 1 contents: annotation?, element* """ attributes = {'id':None, 'minOccurs':'1', 'maxOccurs':'1'} contents = {'xsd':['annotation', 'element']} tag = 'all' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) continue elif component == 'element': if i.hasattr('ref'): content.append(ElementReference(self)) else: content.append(LocalElementDeclaration(self)) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) content[-1].fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class Choice(XMLSchemaComponent,\ ChoiceMarker): """<choice> parents: complexType, extension, restriction, group, choice, sequence attributes: id -- ID minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 contents: annotation?, (element | group | choice | sequence | any)* """ attributes = {'id':None, 'minOccurs':'1', 'maxOccurs':'1'} contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\ 'any']} tag = 'choice' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) continue elif component == 'element': if i.hasattr('ref'): content.append(ElementReference(self)) else: content.append(LocalElementDeclaration(self)) elif component == 'group': content.append(ModelGroupReference(self)) elif component == 'choice': content.append(Choice(self)) elif component == 'sequence': content.append(Sequence(self)) elif component == 'any': content.append(ElementWildCard(self)) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) content[-1].fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class ModelGroupDefinition(XMLSchemaComponent,\ ModelGroupMarker,\ DefinitionMarker): """<group name> parents: redefine, schema attributes: id -- ID name -- NCName, required contents: annotation?, (all | choice | sequence)? """ required = ['name'] attributes = {'id':None, 'name':None} contents = {'xsd':['annotation', 'all', 'choice', 'sequence']} tag = 'group' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) continue elif component == 'all' and not self.content: self.content = All(self) elif component == 'choice' and not self.content: self.content = Choice(self) elif component == 'sequence' and not self.content: self.content = Sequence(self) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class ModelGroupReference(XMLSchemaComponent,\ ModelGroupMarker,\ ReferenceMarker): """<group ref> parents: choice, complexType, extension, restriction, sequence attributes: id -- ID ref -- NCName, required minOccurs -- Whole Number, 1 maxOccurs -- (Whole Number | 'unbounded'), 1 contents: annotation? """ required = ['ref'] attributes = {'id':None, 'ref':None, 'minOccurs':'1', 'maxOccurs':'1'} contents = {'xsd':['annotation']} tag = 'group' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None def getModelGroupReference(self): return self.getModelGroup('ref') def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class ComplexType(XMLSchemaComponent,\ DefinitionMarker,\ ComplexMarker): """<complexType name> parents: redefine, schema attributes: id -- ID name -- NCName, required mixed -- boolean, false abstract -- boolean, false block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault contents: annotation?, (simpleContent | complexContent | ((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?)) """ required = ['name'] attributes = {'id':None, 'name':None, 'mixed':0, 'abstract':0, 'block':lambda self: self._parent().getBlockDefault(), 'final':lambda self: self._parent().getFinalDefault()} contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\ 'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\ 'anyAttribute', 'any']} tag = 'complexType' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None self.attr_content = None def isMixed(self): m = self.getAttribute('mixed') if m == 0 or m == False: return False if isinstance(m, basestring) is True: if m in ('false', '0'): return False if m in ('true', '1'): return True raise SchemaError, 'invalid value for attribute mixed(%s): %s'\ %(m, self.getItemTrace()) def getAttributeContent(self): return self.attr_content def getElementDeclaration(self, attribute): raise Warning, 'invalid operation for <%s>' %self.tag def getTypeDefinition(self, attribute): raise Warning, 'invalid operation for <%s>' %self.tag def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) indx = 0 num = len(contents) if not num: return component = SplitQName(contents[indx].getTagName())[1] if component == 'annotation': self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) indx += 1 if indx < num: component = SplitQName(contents[indx].getTagName())[1] self.content = None if component == 'simpleContent': self.content = self.__class__.SimpleContent(self) self.content.fromDom(contents[indx]) elif component == 'complexContent': self.content = self.__class__.ComplexContent(self) self.content.fromDom(contents[indx]) else: if component == 'all': self.content = All(self) elif component == 'choice': self.content = Choice(self) elif component == 'sequence': self.content = Sequence(self) elif component == 'group': self.content = ModelGroupReference(self) if self.content: self.content.fromDom(contents[indx]) indx += 1 self.attr_content = [] while indx < num: component = SplitQName(contents[indx].getTagName())[1] if component == 'attribute': if contents[indx].hasattr('ref'): self.attr_content.append(AttributeReference(self)) else: self.attr_content.append(LocalAttributeDeclaration(self)) elif component == 'attributeGroup': self.attr_content.append(AttributeGroupReference(self)) elif component == 'anyAttribute': self.attr_content.append(AttributeWildCard(self)) else: raise SchemaError, 'Unknown component (%s): %s' \ %(contents[indx].getTagName(),self.getItemTrace()) self.attr_content[-1].fromDom(contents[indx]) indx += 1 class _DerivedType(XMLSchemaComponent): def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None # XXX remove attribute derivation, inconsistent self.derivation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for i in contents: component = SplitQName(i.getTagName())[1] if component in self.__class__.contents['xsd']: if component == 'annotation' and not self.annotation: self.annotation = Annotation(self) self.annotation.fromDom(i) continue elif component == 'restriction' and not self.derivation: self.derivation = self.__class__.Restriction(self) elif component == 'extension' and not self.derivation: self.derivation = self.__class__.Extension(self) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.derivation.fromDom(i) self.content = self.derivation class ComplexContent(_DerivedType,\ ComplexMarker): """<complexContent> parents: complexType attributes: id -- ID mixed -- boolean, false contents: annotation?, (restriction | extension) """ attributes = {'id':None, 'mixed':0} contents = {'xsd':['annotation', 'restriction', 'extension']} tag = 'complexContent' def isMixed(self): m = self.getAttribute('mixed') if m == 0 or m == False: return False if isinstance(m, basestring) is True: if m in ('false', '0'): return False if m in ('true', '1'): return True raise SchemaError, 'invalid value for attribute mixed(%s): %s'\ %(m, self.getItemTrace()) class _DerivationBase(XMLSchemaComponent): """<extension>,<restriction> parents: complexContent attributes: id -- ID base -- QName, required contents: annotation?, (group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute? """ required = ['base'] attributes = {'id':None, 'base':None } contents = {'xsd':['annotation', 'group', 'all', 'choice',\ 'sequence', 'attribute', 'attributeGroup', 'anyAttribute']} def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None self.attr_content = None def getAttributeContent(self): return self.attr_content def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) indx = 0 num = len(contents) #XXX ugly if not num: return component = SplitQName(contents[indx].getTagName())[1] if component == 'annotation': self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) indx += 1 component = SplitQName(contents[indx].getTagName())[1] if component == 'all': self.content = All(self) self.content.fromDom(contents[indx]) indx += 1 elif component == 'choice': self.content = Choice(self) self.content.fromDom(contents[indx]) indx += 1 elif component == 'sequence': self.content = Sequence(self) self.content.fromDom(contents[indx]) indx += 1 elif component == 'group': self.content = ModelGroupReference(self) self.content.fromDom(contents[indx]) indx += 1 else: self.content = None self.attr_content = [] while indx < num: component = SplitQName(contents[indx].getTagName())[1] if component == 'attribute': if contents[indx].hasattr('ref'): self.attr_content.append(AttributeReference(self)) else: self.attr_content.append(LocalAttributeDeclaration(self)) elif component == 'attributeGroup': if contents[indx].hasattr('ref'): self.attr_content.append(AttributeGroupReference(self)) else: self.attr_content.append(AttributeGroupDefinition(self)) elif component == 'anyAttribute': self.attr_content.append(AttributeWildCard(self)) else: raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName()) self.attr_content[-1].fromDom(contents[indx]) indx += 1 class Extension(_DerivationBase, ExtensionMarker): """<extension base> parents: complexContent attributes: id -- ID base -- QName, required contents: annotation?, (group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute? """ tag = 'extension' class Restriction(_DerivationBase,\ RestrictionMarker): """<restriction base> parents: complexContent attributes: id -- ID base -- QName, required contents: annotation?, (group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute? """ tag = 'restriction' class SimpleContent(_DerivedType,\ SimpleMarker): """<simpleContent> parents: complexType attributes: id -- ID contents: annotation?, (restriction | extension) """ attributes = {'id':None} contents = {'xsd':['annotation', 'restriction', 'extension']} tag = 'simpleContent' class Extension(XMLSchemaComponent,\ ExtensionMarker): """<extension base> parents: simpleContent attributes: id -- ID base -- QName, required contents: annotation?, (attribute | attributeGroup)*, anyAttribute? """ required = ['base'] attributes = {'id':None, 'base':None } contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']} tag = 'extension' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.attr_content = None def getAttributeContent(self): return self.attr_content def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) indx = 0 num = len(contents) if num: component = SplitQName(contents[indx].getTagName())[1] if component == 'annotation': self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) indx += 1 component = SplitQName(contents[indx].getTagName())[1] content = [] while indx < num: component = SplitQName(contents[indx].getTagName())[1] if component == 'attribute': if contents[indx].hasattr('ref'): content.append(AttributeReference(self)) else: content.append(LocalAttributeDeclaration(self)) elif component == 'attributeGroup': content.append(AttributeGroupReference(self)) elif component == 'anyAttribute': content.append(AttributeWildCard(self)) else: raise SchemaError, 'Unknown component (%s)'\ %(contents[indx].getTagName()) content[-1].fromDom(contents[indx]) indx += 1 self.attr_content = tuple(content) class Restriction(XMLSchemaComponent,\ RestrictionMarker): """<restriction base> parents: simpleContent attributes: id -- ID base -- QName, required contents: annotation?, simpleType?, (enumeration | length | maxExclusive | maxInclusive | maxLength | minExclusive | minInclusive | minLength | pattern | fractionDigits | totalDigits | whiteSpace)*, (attribute | attributeGroup)*, anyAttribute? """ required = ['base'] attributes = {'id':None, 'base':None } contents = {'xsd':['annotation', 'simpleType', 'attribute',\ 'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets} tag = 'restriction' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None self.attr_content = None def getAttributeContent(self): return self.attr_content def fromDom(self, node): self.content = [] self.setAttributes(node) contents = self.getContents(node) indx = 0 num = len(contents) component = SplitQName(contents[indx].getTagName())[1] if component == 'annotation': self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) indx += 1 component = SplitQName(contents[indx].getTagName())[1] content = [] while indx < num: component = SplitQName(contents[indx].getTagName())[1] if component == 'attribute': if contents[indx].hasattr('ref'): content.append(AttributeReference(self)) else: content.append(LocalAttributeDeclaration(self)) elif component == 'attributeGroup': content.append(AttributeGroupReference(self)) elif component == 'anyAttribute': content.append(AttributeWildCard(self)) elif component == 'simpleType': self.content.append(AnonymousSimpleType(self)) self.content[-1].fromDom(contents[indx]) else: raise SchemaError, 'Unknown component (%s)'\ %(contents[indx].getTagName()) content[-1].fromDom(contents[indx]) indx += 1 self.attr_content = tuple(content) class LocalComplexType(ComplexType,\ LocalMarker): """<complexType> parents: element attributes: id -- ID mixed -- boolean, false contents: annotation?, (simpleContent | complexContent | ((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?)) """ required = [] attributes = {'id':None, 'mixed':0} tag = 'complexType' class SimpleType(XMLSchemaComponent,\ DefinitionMarker,\ SimpleMarker): """<simpleType name> parents: redefine, schema attributes: id -- ID name -- NCName, required final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*), schema.finalDefault contents: annotation?, (restriction | list | union) """ required = ['name'] attributes = {'id':None, 'name':None, 'final':lambda self: self._parent().getFinalDefault()} contents = {'xsd':['annotation', 'restriction', 'list', 'union']} tag = 'simpleType' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def getElementDeclaration(self, attribute): raise Warning, 'invalid operation for <%s>' %self.tag def getTypeDefinition(self, attribute): raise Warning, 'invalid operation for <%s>' %self.tag def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) for child in contents: component = SplitQName(child.getTagName())[1] if component == 'annotation': self.annotation = Annotation(self) self.annotation.fromDom(child) continue break else: return if component == 'restriction': self.content = self.__class__.Restriction(self) elif component == 'list': self.content = self.__class__.List(self) elif component == 'union': self.content = self.__class__.Union(self) else: raise SchemaError, 'Unknown component (%s)' %(component) self.content.fromDom(child) class Restriction(XMLSchemaComponent,\ RestrictionMarker): """<restriction base> parents: simpleType attributes: id -- ID base -- QName, required or simpleType child contents: annotation?, simpleType?, (enumeration | length | maxExclusive | maxInclusive | maxLength | minExclusive | minInclusive | minLength | pattern | fractionDigits | totalDigits | whiteSpace)* """ attributes = {'id':None, 'base':None } contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets} tag = 'restriction' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None self.facets = None def getAttributeBase(self): return XMLSchemaComponent.getAttribute(self, 'base') def getTypeDefinition(self, attribute='base'): return XMLSchemaComponent.getTypeDefinition(self, attribute) def getSimpleTypeContent(self): for el in self.content: if el.isSimple(): return el return None def fromDom(self, node): self.facets = [] self.setAttributes(node) contents = self.getContents(node) content = [] for indx in range(len(contents)): component = SplitQName(contents[indx].getTagName())[1] if (component == 'annotation') and (not indx): self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) continue elif (component == 'simpleType') and (not indx or indx == 1): content.append(AnonymousSimpleType(self)) content[-1].fromDom(contents[indx]) elif component in RestrictionMarker.facets: self.facets.append(contents[indx]) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class Union(XMLSchemaComponent, UnionMarker): """<union> parents: simpleType attributes: id -- ID memberTypes -- list of QNames, required or simpleType child. contents: annotation?, simpleType* """ attributes = {'id':None, 'memberTypes':None } contents = {'xsd':['annotation', 'simpleType']} tag = 'union' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def fromDom(self, node): self.setAttributes(node) contents = self.getContents(node) content = [] for indx in range(len(contents)): component = SplitQName(contents[indx].getTagName())[1] if (component == 'annotation') and (not indx): self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) elif (component == 'simpleType'): content.append(AnonymousSimpleType(self)) content[-1].fromDom(contents[indx]) else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) self.content = tuple(content) class List(XMLSchemaComponent, ListMarker): """<list> parents: simpleType attributes: id -- ID itemType -- QName, required or simpleType child. contents: annotation?, simpleType? """ attributes = {'id':None, 'itemType':None } contents = {'xsd':['annotation', 'simpleType']} tag = 'list' def __init__(self, parent): XMLSchemaComponent.__init__(self, parent) self.annotation = None self.content = None def getItemType(self): return self.attributes.get('itemType') def getTypeDefinition(self, attribute='itemType'): """ return the type refered to by itemType attribute or the simpleType content. If returns None, then the type refered to by itemType is primitive. """ tp = XMLSchemaComponent.getTypeDefinition(self, attribute) return tp or self.content def fromDom(self, node): self.annotation = None self.content = None self.setAttributes(node) contents = self.getContents(node) for indx in range(len(contents)): component = SplitQName(contents[indx].getTagName())[1] if (component == 'annotation') and (not indx): self.annotation = Annotation(self) self.annotation.fromDom(contents[indx]) elif (component == 'simpleType'): self.content = AnonymousSimpleType(self) self.content.fromDom(contents[indx]) break else: raise SchemaError, 'Unknown component (%s)' %(i.getTagName()) class AnonymousSimpleType(SimpleType,\ SimpleMarker,\ LocalMarker): """<simpleType> parents: attribute, element, list, restriction, union attributes: id -- ID contents: annotation?, (restriction | list | union) """ required = [] attributes = {'id':None} tag = 'simpleType' class Redefine: """<redefine> parents: attributes: contents: """ tag = 'redefine' ########################### ########################### if sys.version_info[:2] >= (2, 2): tupleClass = tuple else: import UserTuple tupleClass = UserTuple.UserTuple class TypeDescriptionComponent(tupleClass): """Tuple of length 2, consisting of a namespace and unprefixed name. """ def __init__(self, args): """args -- (namespace, name) Remove the name's prefix, irrelevant. """ if len(args) != 2: raise TypeError, 'expecting tuple (namespace, name), got %s' %args elif args[1].find(':') >= 0: args = (args[0], SplitQName(args[1])[1]) tuple.__init__(self, args) return def getTargetNamespace(self): return self[0] def getName(self): return self[1]
jlopezmalla/spark
refs/heads/master
examples/src/main/python/mllib/tf_idf_example.py
126
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from pyspark import SparkContext # $example on$ from pyspark.mllib.feature import HashingTF, IDF # $example off$ if __name__ == "__main__": sc = SparkContext(appName="TFIDFExample") # SparkContext # $example on$ # Load documents (one per line). documents = sc.textFile("data/mllib/kmeans_data.txt").map(lambda line: line.split(" ")) hashingTF = HashingTF() tf = hashingTF.transform(documents) # While applying HashingTF only needs a single pass to the data, applying IDF needs two passes: # First to compute the IDF vector and second to scale the term frequencies by IDF. tf.cache() idf = IDF().fit(tf) tfidf = idf.transform(tf) # spark.mllib's IDF implementation provides an option for ignoring terms # which occur in less than a minimum number of documents. # In such cases, the IDF for these terms is set to 0. # This feature can be used by passing the minDocFreq value to the IDF constructor. idfIgnore = IDF(minDocFreq=2).fit(tf) tfidfIgnore = idfIgnore.transform(tf) # $example off$ print("tfidf:") for each in tfidf.collect(): print(each) print("tfidfIgnore:") for each in tfidfIgnore.collect(): print(each) sc.stop()
mvidalgarcia/indico
refs/heads/master
indico/modules/events/contributions/lists.py
2
# This file is part of Indico. # Copyright (C) 2002 - 2019 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from collections import OrderedDict from datetime import timedelta from operator import attrgetter from flask import flash, request, session from sqlalchemy.orm import joinedload, subqueryload from indico.core.db import db from indico.modules.events.contributions.models.contributions import Contribution from indico.modules.events.util import ListGeneratorBase from indico.util.i18n import _ from indico.web.flask.templating import get_template_module class ContributionListGenerator(ListGeneratorBase): """Listing and filtering actions in the contribution list.""" endpoint = '.manage_contributions' list_link_type = 'contribution' check_access = False def __init__(self, event): super(ContributionListGenerator, self).__init__(event) self.default_list_config = {'filters': {'items': {}}} session_empty = {None: _('No session')} track_empty = {None: _('No track')} type_empty = {None: _('No type')} session_choices = OrderedDict((unicode(s.id), s.title) for s in sorted(self.event.sessions, key=attrgetter('title'))) track_choices = OrderedDict((unicode(t.id), t.title) for t in sorted(self.event.tracks, key=attrgetter('title'))) type_choices = OrderedDict((unicode(t.id), t.name) for t in sorted(self.event.contribution_types, key=attrgetter('name'))) self.static_items = OrderedDict([ ('session', {'title': _('Session'), 'filter_choices': OrderedDict(session_empty.items() + session_choices.items())}), ('track', {'title': _('Track'), 'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}), ('type', {'title': _('Type'), 'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}), ('status', {'title': _('Status'), 'filter_choices': {'scheduled': _('Scheduled'), 'unscheduled': _('Not scheduled')}}) ]) self.list_config = self._get_config() def _build_query(self): timetable_entry_strategy = joinedload('timetable_entry') timetable_entry_strategy.lazyload('*') return (Contribution.query.with_parent(self.event) .order_by(Contribution.friendly_id) .options(timetable_entry_strategy, joinedload('session'), subqueryload('person_links'), db.undefer('subcontribution_count'), db.undefer('attachment_count'), db.undefer('is_scheduled'))) def _filter_list_entries(self, query, filters): if not filters.get('items'): return query criteria = [] if 'status' in filters['items']: filtered_statuses = filters['items']['status'] status_criteria = [] if 'scheduled' in filtered_statuses: status_criteria.append(Contribution.is_scheduled) if 'unscheduled' in filtered_statuses: status_criteria.append(~Contribution.is_scheduled) if status_criteria: criteria.append(db.or_(*status_criteria)) filter_cols = {'session': Contribution.session_id, 'track': Contribution.track_id, 'type': Contribution.type_id} for key, column in filter_cols.iteritems(): ids = set(filters['items'].get(key, ())) if not ids: continue column_criteria = [] if None in ids: column_criteria.append(column.is_(None)) if ids - {None}: column_criteria.append(column.in_(ids - {None})) criteria.append(db.or_(*column_criteria)) return query.filter(*criteria) def get_list_kwargs(self): if self.check_access: self.event.preload_all_acl_entries() contributions_query = self._build_query() total_entries = (sum(1 for c in contributions_query if c.can_access(session.user)) if self.check_access else contributions_query.count()) contributions = [c for c in self._filter_list_entries(contributions_query, self.list_config['filters']) if not self.check_access or c.can_access(session.user)] sessions = [{'id': s.id, 'title': s.title, 'colors': s.colors} for s in self.event.sessions] tracks = [{'id': int(t.id), 'title': t.title} for t in self.event.tracks] total_duration = (sum((c.duration for c in contributions), timedelta()), sum((c.duration for c in contributions if c.timetable_entry), timedelta())) selected_entry = request.args.get('selected') selected_entry = int(selected_entry) if selected_entry else None return {'contribs': contributions, 'sessions': sessions, 'tracks': tracks, 'total_entries': total_entries, 'total_duration': total_duration, 'selected_entry': selected_entry} def render_list(self, contrib=None): """Render the contribution list template components. :param contrib: Used in RHs responsible for CRUD operations on a contribution. :return: dict containing the list's entries, the fragment of displayed entries and whether the contribution passed is displayed in the results. """ contrib_list_kwargs = self.get_list_kwargs() total_entries = contrib_list_kwargs.pop('total_entries') selected_entry = contrib_list_kwargs.pop('selected_entry') tpl_contrib = get_template_module('events/contributions/management/_contribution_list.html') tpl_lists = get_template_module('events/management/_lists.html') contribs = contrib_list_kwargs['contribs'] filter_statistics = tpl_lists.render_filter_statistics(len(contribs), total_entries, contrib_list_kwargs.pop('total_duration')) return {'html': tpl_contrib.render_contrib_list(self.event, total_entries, **contrib_list_kwargs), 'hide_contrib': contrib not in contribs if contrib else None, 'filter_statistics': filter_statistics, 'selected_entry': selected_entry} def flash_info_message(self, contrib): flash(_("The contribution '{}' is not displayed in the list due to the enabled filters") .format(contrib.title), 'info') class ContributionDisplayListGenerator(ContributionListGenerator): endpoint = '.contribution_list' list_link_type = 'contribution_display' check_access = True def render_contribution_list(self): """Render the contribution list template components. :return: dict containing the list's entries, the fragment of displayed entries and whether the contribution passed is displayed in the results. """ contrib_list_kwargs = self.get_list_kwargs() total_entries = contrib_list_kwargs.pop('total_entries') contribs = contrib_list_kwargs['contribs'] tpl = get_template_module('events/contributions/display/_contribution_list.html') tpl_lists = get_template_module('events/management/_lists.html') return {'html': tpl.render_contribution_list(self.event, self.event.display_tzinfo, contribs, total_entries), 'counter': tpl_lists.render_displayed_entries_fragment(len(contribs), total_entries)}
shingonoide/odoo
refs/heads/deverp_8.0
addons/website_hr/controllers/__init__.py
7372
import main
Buggaarde/youtube-dl
refs/heads/master
youtube_dl/extractor/tunein.py
120
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ExtractorError class TuneInIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)? (?: tunein\.com/ (?: radio/.*?-s| station/.*?StationId\= )(?P<id>[0-9]+) |tun\.in/(?P<redirect_id>[A-Za-z0-9]+) ) ''' _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station' _INFO_DICT = { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'aac', 'thumbnail': 're:^https?://.*\.png$', 'location': 'Tacoma, WA', } _TESTS = [ { 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', 'info_dict': _INFO_DICT, 'params': { 'skip_download': True, # live stream }, }, { # test redirection 'url': 'http://tun.in/ser7s', 'info_dict': _INFO_DICT, 'params': { 'skip_download': True, # live stream }, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) redirect_id = mobj.group('redirect_id') if redirect_id: # The server doesn't support HEAD requests urlh = self._request_webpage( url, redirect_id, note='Downloading redirect page') url = urlh.geturl() self.to_screen('Following redirect: %s' % url) mobj = re.match(self._VALID_URL, url) station_id = mobj.group('id') station_info = self._download_json( self._API_URL_TEMPLATE.format(station_id), station_id, note='Downloading station JSON') title = station_info['Title'] thumbnail = station_info.get('Logo') location = station_info.get('Location') streams_url = station_info.get('StreamUrl') if not streams_url: raise ExtractorError('No downloadable streams found', expected=True) stream_data = self._download_webpage( streams_url, station_id, note='Downloading stream data') streams = json.loads(self._search_regex( r'\((.*)\);', stream_data, 'stream info'))['Streams'] is_live = None formats = [] for stream in streams: if stream.get('Type') == 'Live': is_live = True reliability = stream.get('Reliability') format_note = ( 'Reliability: %d%%' % reliability if reliability is not None else None) formats.append({ 'preference': ( 0 if reliability is None or reliability > 90 else 1), 'abr': stream.get('Bandwidth'), 'ext': stream.get('MediaType').lower(), 'acodec': stream.get('MediaType'), 'vcodec': 'none', 'url': stream.get('Url'), 'source_preference': reliability, 'format_note': format_note, }) self._sort_formats(formats) return { 'id': station_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'location': location, 'is_live': is_live, }
chirilo/remo
refs/heads/master
remo/reports/tests/__init__.py
3
import datetime from random import randint from django.db.models.signals import post_save from django.utils.timezone import now import factory from factory import fuzzy from remo.profiles.models import FunctionalArea from remo.profiles.tests import UserFactory from remo.reports.models import (Activity, Campaign, NGReport, NGReportComment, email_commenters_on_add_ng_report_comment) class ActivityFactory(factory.django.DjangoModelFactory): FACTORY_FOR = Activity name = factory.Sequence(lambda n: 'Activity #{0}'.format(n)) class CampaignFactory(factory.django.DjangoModelFactory): FACTORY_FOR = Campaign name = factory.Sequence(lambda n: 'Campaign #{0}'.format(n)) class NGReportFactory(factory.django.DjangoModelFactory): FACTORY_FOR = NGReport user = factory.SubFactory(UserFactory, userprofile__initial_council=True) mentor = factory.SelfAttribute('user.userprofile.mentor') activity = factory.SubFactory(ActivityFactory) latitude = fuzzy.FuzzyDecimal(low=-90.0, high=90.0, precision=5) longitude = fuzzy.FuzzyDecimal(low=-180.0, high=180.0, precision=5) location = 'Activity Location' is_passive = False link = 'www.example.com' report_date = fuzzy.FuzzyDate(datetime.date(2013, 01, 01), now().date()) @factory.post_generation def functional_areas(self, create, extracted, **kwargs): """Add functional areas list after object generation.""" if not create: return if extracted: for area in extracted: self.functional_areas.add(area) else: # create random rand_int = randint(1, 6) for area in (FunctionalArea.active_objects.all() .order_by('?')[:rand_int]): self.functional_areas.add(area) class NGReportCommentFactory(factory.django.DjangoModelFactory): FACTORY_FOR = NGReportComment user = factory.SubFactory(UserFactory, userprofile__initial_council=True) report = factory.SubFactory(NGReportFactory) comment = factory.Sequence(lambda n: 'Comment #{0}'.format(n)) class NGReportCommentFactoryNoSignals(NGReportCommentFactory): @classmethod def _create(cls, target_class, *args, **kwargs): dispatch_uid = 'email_commenters_on_add_ng_report_comment_signal' post_save.disconnect(email_commenters_on_add_ng_report_comment, NGReportComment, dispatch_uid=dispatch_uid) comment = super(NGReportCommentFactory, cls)._create(target_class, *args, **kwargs) post_save.connect(email_commenters_on_add_ng_report_comment, NGReportComment, dispatch_uid=dispatch_uid) return comment
cloudera/recordservice
refs/heads/master
thirdparty/thrift-0.9.0/contrib/zeromq/TZmqServer.py
108
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import logging import zmq import thrift.server.TServer import thrift.transport.TTransport class TZmqServer(thrift.server.TServer.TServer): def __init__(self, processor, ctx, endpoint, sock_type): thrift.server.TServer.TServer.__init__(self, processor, None) self.zmq_type = sock_type self.socket = ctx.socket(sock_type) self.socket.bind(endpoint) def serveOne(self): msg = self.socket.recv() itrans = thrift.transport.TTransport.TMemoryBuffer(msg) otrans = thrift.transport.TTransport.TMemoryBuffer() iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: self.processor.process(iprot, oprot) except Exception: logging.exception("Exception while processing request") # Fall through and send back a response, even if empty or incomplete. if self.zmq_type == zmq.REP: msg = otrans.getvalue() self.socket.send(msg) def serve(self): while True: self.serveOne() class TZmqMultiServer(object): def __init__(self): self.servers = [] def serveOne(self, timeout = -1): self._serveActive(self._setupPoll(), timeout) def serveForever(self): poll_info = self._setupPoll() while True: self._serveActive(poll_info, -1) def _setupPoll(self): server_map = {} poller = zmq.Poller() for server in self.servers: server_map[server.socket] = server poller.register(server.socket, zmq.POLLIN) return (server_map, poller) def _serveActive(self, poll_info, timeout): (server_map, poller) = poll_info ready = dict(poller.poll()) for sock, state in ready.items(): assert (state & zmq.POLLIN) != 0 server_map[sock].serveOne()
synergeticsedx/deployment-wipro
refs/heads/oxa/master.fic
common/djangoapps/xblock_django/migrations/0002_auto_20160204_0809.py
60
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('xblock_django', '0001_initial'), ] operations = [ migrations.AddField( model_name='xblockdisableconfig', name='disabled_create_blocks', field=models.TextField(default=b'', help_text='Space-separated list of XBlock types whose creation to disable in Studio.', blank=True), ), ]
jerli/sympy
refs/heads/master
sympy/combinatorics/tests/test_group_constructs.py
129
from sympy.combinatorics.group_constructs import DirectProduct from sympy.combinatorics.named_groups import CyclicGroup, DihedralGroup def test_direct_product_n(): C = CyclicGroup(4) D = DihedralGroup(4) G = DirectProduct(C, C, C) assert G.order() == 64 assert G.degree == 12 assert len(G.orbits()) == 3 assert G.is_abelian is True H = DirectProduct(D, C) assert H.order() == 32 assert H.is_abelian is False
grundgruen/zipline
refs/heads/master
zipline/gens/composites.py
43
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import heapq def _decorate_source(source): for message in source: yield ((message.dt, message.source_id), message) def date_sorted_sources(*sources): """ Takes an iterable of sources, generating namestrings and piping their output into date_sort. """ sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources)) # Strip out key decoration for _, message in sorted_stream: yield message