repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
thomasgilgenast/spqr-nonrel
refs/heads/master
filetransfers/backends/delegate.py
29
from django.conf import settings from filetransfers.api import prepare_upload as delegate def prepare_upload(*args, **kwargs): """Delegates uploads to other backends based on private=False or True""" if kwargs['private']: kwargs['backend'] = settings.PRIVATE_PREPARE_UPLOAD_BACKEND else: kwargs['backend'] = settings.PUBLIC_PREPARE_UPLOAD_BACKEND return delegate(*args, **kwargs)
mightbejosh/dj-braintree
refs/heads/transactions
tests/test_middleware.py
1
# import datetime # import decimal # # from django.contrib.auth import get_user_model # from django.contrib.auth.models import AnonymousUser # from django.test import TestCase # from django.test.client import RequestFactory # from django.test.utils import override_settings # from django.utils import timezone # # from djbraintree.models import Customer, CurrentSubscription # from djbraintree.middleware import SubscriptionPaymentMiddleware # # # class MiddlewareURLTest(TestCase): # # def setUp(self): # self.settings(ROOT_URLCONF='tests.test_urls') # self.factory = RequestFactory() # self.user = get_user_model().objects.create_user(username="pydanny", # email="pydanny@gmail.com") # self.middleware = SubscriptionPaymentMiddleware() # # def test_appname(self): # request = self.factory.get("/admin/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_namespace(self): # request = self.factory.get("/djbraintree/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_namespace_and_url(self): # request = self.factory.get("/testapp_namespaced/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_url(self): # request = self.factory.get("/testapp/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # @override_settings(DEBUG=True) # def test_djdt(self): # request = self.factory.get("/__debug__/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_fnmatch(self): # request = self.factory.get("/test_fnmatch/extra_text/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # # class MiddlewareLogicTest(TestCase): # urls = 'tests.test_urls' # # def setUp(self): # period_start = datetime.datetime(2013, 4, 1, tzinfo=timezone.utc) # period_end = datetime.datetime(2013, 4, 30, tzinfo=timezone.utc) # start = datetime.datetime(2013, 1, 1, tzinfo=timezone.utc) # # self.factory = RequestFactory() # self.user = get_user_model().objects.create_user(username="pydanny", # email="pydanny@gmail.com") # self.customer = Customer.objects.create( # subscriber=self.user, # stripe_id="cus_xxxxxxxxxxxxxxx", # card_fingerprint="YYYYYYYY", # card_last_4="2342", # card_kind="Visa" # ) # self.subscription = CurrentSubscription.objects.create( # customer=self.customer, # plan="test", # current_period_start=period_start, # current_period_end=period_end, # amount=(500 / decimal.Decimal("100.0")), # status="active", # start=start, # quantity=1, # cancel_at_period_end=True # ) # self.middleware = SubscriptionPaymentMiddleware() # # def test_anonymous(self): # request = self.factory.get("/djbraintree/") # request.user = AnonymousUser() # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_is_staff(self): # self.user.is_staff = True # self.user.save() # # request = self.factory.get("/djbraintree/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_is_superuser(self): # self.user.is_superuser = True # self.user.save() # # request = self.factory.get("/djbraintree/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None) # # def test_customer_has_inactive_subscription(self): # request = self.factory.get("/testapp_content/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response.status_code, 302) # # def test_customer_has_active_subscription(self): # end_date = datetime.datetime(2100, 4, 30, tzinfo=timezone.utc) # self.subscription.current_period_end = end_date # self.subscription.save() # # request = self.factory.get("/testapp_content/") # request.user = self.user # # response = self.middleware.process_request(request) # self.assertEqual(response, None)
MiLk/ansible
refs/heads/devel
lib/ansible/modules/cloud/amazon/ec2_instance_facts.py
20
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_instance_facts short_description: Gather facts about ec2 instances in AWS description: - Gather facts about ec2 instances in AWS version_added: "2.4" author: - Michael Schuett, @michaeljs1990 - Rob White, @wimnat options: instance_ids: description: - If you specify one or more instance IDs, only instances that have the specified IDs are returned. required: false version_added: 2.4 filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter names and values are case sensitive. required: false default: {} extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather facts about all instances - ec2_instance_facts: # Gather facts about all instances in AZ ap-southeast-2a - ec2_instance_facts: filters: availability-zone: ap-southeast-2a # Gather facts about a particular instance using ID - ec2_instance_facts: instance_ids: - i-12345678 # Gather facts about any instance with a tag key Name and value Example - ec2_instance_facts: filters: "tag:Name": Example ''' RETURN = ''' instances: description: a list of ec2 instances returned: always type: complex contains: ami_launch_index: description: The AMI launch index, which can be used to find this instance in the launch group. returned: always type: int sample: 0 architecture: description: The architecture of the image returned: always type: string sample: x86_64 block_device_mappings: description: Any block device mapping entries for the instance. returned: always type: complex contains: device_name: description: The device name exposed to the instance (for example, /dev/sdh or xvdh). returned: always type: string sample: /dev/sdh ebs: description: Parameters used to automatically set up EBS volumes when the instance is launched. returned: always type: complex contains: attach_time: description: The time stamp when the attachment initiated. returned: always type: string sample: "2017-03-23T22:51:24+00:00" delete_on_termination: description: Indicates whether the volume is deleted on instance termination. returned: always type: bool sample: true status: description: The attachment state. returned: always type: string sample: attached volume_id: description: The ID of the EBS volume returned: always type: string sample: vol-12345678 client_token: description: The idempotency token you provided when you launched the instance, if applicable. returned: always type: string sample: mytoken ebs_optimized: description: Indicates whether the instance is optimized for EBS I/O. returned: always type: bool sample: false hypervisor: description: The hypervisor type of the instance. returned: always type: string sample: xen iam_instance_profile: description: The IAM instance profile associated with the instance, if applicable. returned: always type: complex contains: arn: description: The Amazon Resource Name (ARN) of the instance profile. returned: always type: string sample: "arn:aws:iam::000012345678:instance-profile/myprofile" id: description: The ID of the instance profile returned: always type: string sample: JFJ397FDG400FG9FD1N image_id: description: The ID of the AMI used to launch the instance. returned: always type: string sample: ami-0011223344 instance_id: description: The ID of the instance. returned: always type: string sample: i-012345678 instance_type: description: The instance type size of the running instance. returned: always type: string sample: t2.micro key_name: description: The name of the key pair, if this instance was launched with an associated key pair. returned: always type: string sample: my-key launch_time: description: The time the instance was launched. returned: always type: string sample: "2017-03-23T22:51:24+00:00" monitoring: description: The monitoring for the instance. returned: always type: complex contains: state: description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. returned: always type: string sample: disabled network_interfaces: description: One or more network interfaces for the instance. returned: always type: complex contains: association: description: The association information for an Elastic IPv4 associated with the network interface. returned: always type: complex contains: ip_owner_id: description: The ID of the owner of the Elastic IP address. returned: always type: string sample: amazon public_dns_name: description: The public DNS name. returned: always type: string sample: "" public_ip: description: The public IP address or Elastic IP address bound to the network interface. returned: always type: string sample: 1.2.3.4 attachment: description: The network interface attachment. returned: always type: complex contains: attach_time: description: The time stamp when the attachment initiated. returned: always type: string sample: "2017-03-23T22:51:24+00:00" attachment_id: description: The ID of the network interface attachment. returned: always type: string sample: eni-attach-3aff3f delete_on_termination: description: Indicates whether the network interface is deleted when the instance is terminated. returned: always type: bool sample: true device_index: description: The index of the device on the instance for the network interface attachment. returned: always type: int sample: 0 status: description: The attachment state. returned: always type: string sample: attached description: description: The description. returned: always type: string sample: My interface groups: description: One or more security groups. returned: always type: complex contains: - group_id: description: The ID of the security group. returned: always type: string sample: sg-abcdef12 group_name: description: The name of the security group. returned: always type: string sample: mygroup ipv6_addresses: description: One or more IPv6 addresses associated with the network interface. returned: always type: complex contains: - ipv6_address: description: The IPv6 address. returned: always type: string sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" mac_address: description: The MAC address. returned: always type: string sample: "00:11:22:33:44:55" network_interface_id: description: The ID of the network interface. returned: always type: string sample: eni-01234567 owner_id: description: The AWS account ID of the owner of the network interface. returned: always type: string sample: 01234567890 private_ip_address: description: The IPv4 address of the network interface within the subnet. returned: always type: string sample: 10.0.0.1 private_ip_addresses: description: The private IPv4 addresses associated with the network interface. returned: always type: complex contains: - association: description: The association information for an Elastic IP address (IPv4) associated with the network interface. returned: always type: complex contains: ip_owner_id: description: The ID of the owner of the Elastic IP address. returned: always type: string sample: amazon public_dns_name: description: The public DNS name. returned: always type: string sample: "" public_ip: description: The public IP address or Elastic IP address bound to the network interface. returned: always type: string sample: 1.2.3.4 primary: description: Indicates whether this IPv4 address is the primary private IP address of the network interface. returned: always type: bool sample: true private_ip_address: description: The private IPv4 address of the network interface. returned: always type: string sample: 10.0.0.1 source_dest_check: description: Indicates whether source/destination checking is enabled. returned: always type: bool sample: true status: description: The status of the network interface. returned: always type: string sample: in-use subnet_id: description: The ID of the subnet for the network interface. returned: always type: string sample: subnet-0123456 vpc_id: description: The ID of the VPC for the network interface. returned: always type: string sample: vpc-0123456 placement: description: The location where the instance launched, if applicable. returned: always type: complex contains: availability_zone: description: The Availability Zone of the instance. returned: always type: string sample: ap-southeast-2a group_name: description: The name of the placement group the instance is in (for cluster compute instances). returned: always type: string sample: "" tenancy: description: The tenancy of the instance (if the instance is running in a VPC). returned: always type: string sample: default private_dns_name: description: The private DNS name. returned: always type: string sample: ip-10-0-0-1.ap-southeast-2.compute.internal private_ip_address: description: The IPv4 address of the network interface within the subnet. returned: always type: string sample: 10.0.0.1 product_codes: description: One or more product codes. returned: always type: complex contains: - product_code_id: description: The product code. returned: always type: string sample: aw0evgkw8ef3n2498gndfgasdfsd5cce product_code_type: description: The type of product code. returned: always type: string sample: marketplace public_dns_name: description: The public DNS name assigned to the instance. returned: always type: string sample: public_ip_address: description: The public IPv4 address assigned to the instance returned: always type: string sample: 52.0.0.1 root_device_name: description: The device name of the root device returned: always type: string sample: /dev/sda1 root_device_type: description: The type of root device used by the AMI. returned: always type: string sample: ebs security_groups: description: One or more security groups for the instance. returned: always type: complex contains: - group_id: description: The ID of the security group. returned: always type: string sample: sg-0123456 - group_name: description: The name of the security group. returned: always type: string sample: my-security-group source_dest_check: description: Indicates whether source/destination checking is enabled. returned: always type: bool sample: true state: description: The current state of the instance. returned: always type: complex contains: code: description: The low byte represents the state. returned: always type: int sample: 16 name: description: The name of the state. returned: always type: string sample: running state_transition_reason: description: The reason for the most recent state transition. returned: always type: string sample: subnet_id: description: The ID of the subnet in which the instance is running. returned: always type: string sample: subnet-00abcdef tags: description: Any tags assigned to the instance. returned: always type: dict sample: virtualization_type: description: The type of virtualization of the AMI. returned: always type: string sample: hvm vpc_id: description: The ID of the VPC the instance is in. returned: always type: dict sample: vpc-0011223344 ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info) try: import boto3 from botocore.exceptions import ClientError, NoCredentialsError HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False def list_ec2_instances(connection, module): instance_ids = module.params.get("instance_ids") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: reservations_paginator = connection.get_paginator('describe_instances') reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result() except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Get instances from reservations instances = [] for reservation in reservations['Reservations']: instances = instances + reservation['Instances'] # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] # Turn the boto3 result in to ansible friendly tag dictionary for instance in snaked_instances: if 'tags' in instance: instance['tags'] = boto3_tag_list_to_ansible_dict(instance['tags']) module.exit_json(instances=snaked_instances) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( instance_ids=dict(default=[], type='list'), filters=dict(default={}, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[ ['instance_ids', 'filters'] ], supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_instances(connection, module) if __name__ == '__main__': main()
syaiful6/django
refs/heads/master
django/middleware/cache.py
372
""" Cache middleware. If enabled, each Django-powered page will be cached based on URL. The canonical way to enable cache middleware is to set ``UpdateCacheMiddleware`` as your first piece of middleware, and ``FetchFromCacheMiddleware`` as the last:: MIDDLEWARE_CLASSES = [ 'django.middleware.cache.UpdateCacheMiddleware', ... 'django.middleware.cache.FetchFromCacheMiddleware' ] This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run last during the response phase, which processes middleware bottom-up; ``FetchFromCacheMiddleware`` needs to run last during the request phase, which processes middleware top-down. The single-class ``CacheMiddleware`` can be used for some simple sites. However, if any other piece of middleware needs to affect the cache key, you'll need to use the two-part ``UpdateCacheMiddleware`` and ``FetchFromCacheMiddleware``. This'll most often happen when you're using Django's ``LocaleMiddleware``. More details about how the caching works: * Only GET or HEAD-requests with status code 200 are cached. * The number of seconds each page is stored for is set by the "max-age" section of the response's "Cache-Control" header, falling back to the CACHE_MIDDLEWARE_SECONDS setting if the section was not found. * This middleware expects that a HEAD request is answered with the same response headers exactly like the corresponding GET request. * When a hit occurs, a shallow copy of the original response object is returned from process_request. * Pages will be cached based on the contents of the request headers listed in the response's "Vary" header. * This middleware also sets ETag, Last-Modified, Expires and Cache-Control headers on the response object. """ from django.conf import settings from django.core.cache import DEFAULT_CACHE_ALIAS, caches from django.utils.cache import ( get_cache_key, get_max_age, has_vary_header, learn_cache_key, patch_response_headers, ) class UpdateCacheMiddleware(object): """ Response-phase cache middleware that updates the cache if the response is cacheable. Must be used as part of the two-part update/fetch cache middleware. UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE_CLASSES so that it'll get called last during the response phase. """ def __init__(self): self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] def _should_update_cache(self, request, response): return hasattr(request, '_cache_update_cache') and request._cache_update_cache def process_response(self, request, response): """Sets the cache, if needed.""" if not self._should_update_cache(request, response): # We don't need to update the cache, just return. return response if response.streaming or response.status_code != 200: return response # Don't cache responses that set a user-specific (and maybe security # sensitive) cookie in response to a cookie-less request. if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'): return response # Try to get the timeout from the "max-age" section of the "Cache- # Control" header before reverting to using the default cache_timeout # length. timeout = get_max_age(response) if timeout is None: timeout = self.cache_timeout elif timeout == 0: # max-age was set to 0, don't bother caching. return response patch_response_headers(response, timeout) if timeout: cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache) if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback( lambda r: self.cache.set(cache_key, r, timeout) ) else: self.cache.set(cache_key, response, timeout) return response class FetchFromCacheMiddleware(object): """ Request-phase cache middleware that fetches a page from the cache. Must be used as part of the two-part update/fetch cache middleware. FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE_CLASSES so that it'll get called last during the request phase. """ def __init__(self): self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] def process_request(self, request): """ Checks whether the page is already cached and returns the cached version if available. """ if request.method not in ('GET', 'HEAD'): request._cache_update_cache = False return None # Don't bother checking the cache. # try and get the cached GET response cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache) if cache_key is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. response = self.cache.get(cache_key) # if it wasn't found and we are looking for a HEAD, try looking just for that if response is None and request.method == 'HEAD': cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache) response = self.cache.get(cache_key) if response is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. # hit, return cached response request._cache_update_cache = False return response class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware): """ Cache middleware that provides basic behavior for many simple sites. Also used as the hook point for the cache decorator, which is generated using the decorator-from-middleware utility. """ def __init__(self, cache_timeout=None, **kwargs): # We need to differentiate between "provided, but using default value", # and "not provided". If the value is provided using a default, then # we fall back to system defaults. If it is not provided at all, # we need to use middleware defaults. try: key_prefix = kwargs['key_prefix'] if key_prefix is None: key_prefix = '' except KeyError: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.key_prefix = key_prefix try: cache_alias = kwargs['cache_alias'] if cache_alias is None: cache_alias = DEFAULT_CACHE_ALIAS except KeyError: cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache_alias = cache_alias if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.cache_timeout = cache_timeout self.cache = caches[self.cache_alias]
johnkit/vtk-dev
refs/heads/master
ThirdParty/Twisted/twisted/plugins/twisted_telnet.py
71
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.application.service import ServiceMaker TwistedTelnet = ServiceMaker( "Twisted Telnet Shell Server", "twisted.tap.telnet", "A simple, telnet-based remote debugging service.", "telnet")
klen/sailplay
refs/heads/develop
setup.py
1
#!/usr/bin/env python import re import sys from os import path as op from setuptools import setup from setuptools.command.test import test as TestCommand def _read(fname): try: return open(op.join(op.dirname(__file__), fname)).read() except IOError: return '' _meta = _read('sailplay.py') _license = re.search(r'^__license__\s*=\s*"(.*)"', _meta, re.M).group(1) _project = re.search(r'^__project__\s*=\s*"(.*)"', _meta, re.M).group(1) _version = re.search(r'^__version__\s*=\s*"(.*)"', _meta, re.M).group(1) install_requires = [ l for l in _read('requirements.txt').split('\n') if l and not l.startswith('#')] tests_require = [ l for l in _read('requirements-tests.txt').split('\n') if l and not l.startswith('#')] class __PyTest(TestCommand): test_args = [] test_suite = True def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.test_args) sys.exit(errno) setup( name=_project, version=_version, license=_license, description='API Client for sailplay.ru', long_description=_read('README.rst'), platforms=('Any'), keywords = "django flask sqlalchemy testing mock stub mongoengine data".split(), # noqa author='Kirill Klenov', author_email='horneds@gmail.com', url='https://github.com/klen/sailplay', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Natural Language :: Russian', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Testing', 'Topic :: Utilities', ], py_modules=['sailplay'], include_package_data=True, install_requires=install_requires, tests_require=tests_require, cmdclass={'test': __PyTest}, ) # pylama:ignore=D
ashokrajbathu/boabrock
refs/heads/master
frappe/patches/v4_0/fix_attach_field_file_url.py
32
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe def execute(): attach_fields = (frappe.db.sql("""select parent, fieldname from `tabDocField` where fieldtype='Attach'""") + frappe.db.sql("""select dt, fieldname from `tabCustom Field` where fieldtype='Attach'""")) for doctype, fieldname in attach_fields: frappe.db.sql("""update `tab{doctype}` set `{fieldname}`=concat("/", `{fieldname}`) where `{fieldname}` like 'files/%'""".format(doctype=doctype, fieldname=fieldname))
taoyunxing/trafficserver
refs/heads/master
tests/tools/traffic-replay/RandomReplay.py
5
#!/bin/env python3 ''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import requests import os from threading import Thread import sys from multiprocessing import current_process import sessionvalidation.sessionvalidation as sv from collections import deque import collections import lib.result as result import extractHeader import mainProcess import json import gzip import NonSSL import SSLReplay import h2Replay import itertools import random bSTOP = False def session_replay(input, proxy, result_queue): global bSTOP ''' Replay all transactions in session This entire session will be replayed in one requests.Session (so one socket / TCP connection)''' # if timing_control: # time.sleep(float(session._timestamp)) # allow other threads to run while bSTOP == False: for session in iter(input.get, 'STOP'): # print(bSTOP) if session == 'STOP': print("Queue is empty") bSTOP = True break with requests.Session() as request_session: request_session.proxies = proxy for txn in session.getTransactionIter(): type = random.randint(1, 1000) try: if type % 3 == 0: NonSSL.txn_replay(session._filename, txn, proxy, result_queue, request_session) elif type % 3 == 1: SSLReplay.txn_replay(session._filename, txn, proxy, result_queue, request_session) elif type % 3 == 2: h2Replay.txn_replay(session._filename, txn, proxy, result_queue, request_session) except: e = sys.exc_info() print("ERROR in replaying: ", e, txn.getRequest().getHeaders()) bSTOP = True #print("Queue is empty") input.put('STOP') break def client_replay(input, proxy, result_queue, nThread): Threads = [] for i in range(nThread): t2 = Thread(target=SSLReplay.session_replay, args=[input, proxy, result_queue]) t = Thread(target=NonSSL.session_replay, args=[input, proxy, result_queue]) t1 = Thread(target=h2Replay.session_replay, args=[input, proxy, result_queue]) t2.start() t.start() t1.start() Threads.append(t) Threads.append(t2) Threads.append(t1) for t1 in Threads: t1.join()
rizumu/django
refs/heads/master
tests/migrations/test_migrations_conflict/0002_second.py
564
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("migrations", "0001_initial")] operations = [ migrations.DeleteModel("Tribble"), migrations.RemoveField("Author", "silly_field"), migrations.AddField("Author", "rating", models.IntegerField(default=0)), migrations.CreateModel( "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("migrations.Author", models.SET_NULL, null=True)), ], ) ]
nzavagli/UnrealPy
refs/heads/master
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/cryptography-0.9.3/src/cryptography/hazmat/bindings/openssl/cmac.py
15
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function INCLUDES = """ #if OPENSSL_VERSION_NUMBER >= 0x10001000L #include <openssl/cmac.h> #endif """ TYPES = """ static const int Cryptography_HAS_CMAC; typedef ... CMAC_CTX; """ FUNCTIONS = """ """ MACROS = """ CMAC_CTX *CMAC_CTX_new(void); int CMAC_Init(CMAC_CTX *, const void *, size_t, const EVP_CIPHER *, ENGINE *); int CMAC_Update(CMAC_CTX *, const void *, size_t); int CMAC_Final(CMAC_CTX *, unsigned char *, size_t *); int CMAC_CTX_copy(CMAC_CTX *, const CMAC_CTX *); void CMAC_CTX_free(CMAC_CTX *); """ CUSTOMIZATIONS = """ #if OPENSSL_VERSION_NUMBER < 0x10001000L static const long Cryptography_HAS_CMAC = 0; typedef void CMAC_CTX; CMAC_CTX *(*CMAC_CTX_new)(void) = NULL; int (*CMAC_Init)(CMAC_CTX *, const void *, size_t, const EVP_CIPHER *, ENGINE *) = NULL; int (*CMAC_Update)(CMAC_CTX *, const void *, size_t) = NULL; int (*CMAC_Final)(CMAC_CTX *, unsigned char *, size_t *) = NULL; int (*CMAC_CTX_copy)(CMAC_CTX *, const CMAC_CTX *) = NULL; void (*CMAC_CTX_free)(CMAC_CTX *) = NULL; #else static const long Cryptography_HAS_CMAC = 1; #endif """ CONDITIONAL_NAMES = { "Cryptography_HAS_CMAC": [ "CMAC_CTX_new", "CMAC_Init", "CMAC_Update", "CMAC_Final", "CMAC_CTX_copy", "CMAC_CTX_free", ], }
GoogleCloudPlatform/functions-framework-python
refs/heads/master
tests/test_cli.py
1
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pretend import pytest from click.testing import CliRunner import functions_framework from functions_framework._cli import _cli def test_cli_no_arguments(): runner = CliRunner() result = runner.invoke(_cli) assert result.exit_code == 2 assert "Missing option '--target'" in result.output @pytest.mark.parametrize( "args, env, create_app_calls, run_calls", [ ( ["--target", "foo"], {}, [pretend.call("foo", None, "http")], [pretend.call("0.0.0.0", 8080)], ), ( [], {"FUNCTION_TARGET": "foo"}, [pretend.call("foo", None, "http")], [pretend.call("0.0.0.0", 8080)], ), ( ["--target", "foo", "--source", "/path/to/source.py"], {}, [pretend.call("foo", "/path/to/source.py", "http")], [pretend.call("0.0.0.0", 8080)], ), ( [], {"FUNCTION_TARGET": "foo", "FUNCTION_SOURCE": "/path/to/source.py"}, [pretend.call("foo", "/path/to/source.py", "http")], [pretend.call("0.0.0.0", 8080)], ), ( ["--target", "foo", "--signature-type", "event"], {}, [pretend.call("foo", None, "event")], [pretend.call("0.0.0.0", 8080)], ), ( [], {"FUNCTION_TARGET": "foo", "FUNCTION_SIGNATURE_TYPE": "event"}, [pretend.call("foo", None, "event")], [pretend.call("0.0.0.0", 8080)], ), ( ["--target", "foo", "--dry-run"], {}, [pretend.call("foo", None, "http")], [], ), ( [], {"FUNCTION_TARGET": "foo", "DRY_RUN": "True"}, [pretend.call("foo", None, "http")], [], ), ( ["--target", "foo", "--host", "127.0.0.1"], {}, [pretend.call("foo", None, "http")], [pretend.call("127.0.0.1", 8080)], ), ( ["--target", "foo", "--debug"], {}, [pretend.call("foo", None, "http")], [pretend.call("0.0.0.0", 8080)], ), ( [], {"FUNCTION_TARGET": "foo", "DEBUG": "True"}, [pretend.call("foo", None, "http")], [pretend.call("0.0.0.0", 8080)], ), ], ) def test_cli(monkeypatch, args, env, create_app_calls, run_calls): wsgi_server = pretend.stub(run=pretend.call_recorder(lambda *a, **kw: None)) wsgi_app = pretend.stub(run=pretend.call_recorder(lambda *a, **kw: None)) create_app = pretend.call_recorder(lambda *a, **kw: wsgi_app) monkeypatch.setattr(functions_framework._cli, "create_app", create_app) create_server = pretend.call_recorder(lambda *a, **kw: wsgi_server) monkeypatch.setattr(functions_framework._cli, "create_server", create_server) runner = CliRunner(env=env) result = runner.invoke(_cli, args) assert result.exit_code == 0 assert create_app.calls == create_app_calls assert wsgi_server.run.calls == run_calls
RicardoJohann/um
refs/heads/master
erpnext/setup/doctype/sales_email_settings/sales_email_settings.py
41
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.utils import cint from frappe.model.document import Document class SalesEmailSettings(Document): def validate(self): if cint(self.extract_emails) and not (self.email_id and self.host and \ self.username and self.password): frappe.msgprint(_("""Host, Email and Password required if emails are to be pulled"""), raise_exception=True)
atsolakid/edx-platform
refs/heads/master
lms/djangoapps/django_comment_client/tests/test_utils.py
5
# -*- coding: utf-8 -*- import datetime import json import mock from nose.plugins.attrib import attr from pytz import UTC from django.utils.timezone import UTC as django_utc from django.core.urlresolvers import reverse from django.test import TestCase, RequestFactory from edxmako import add_lookup from django_comment_client.tests.factories import RoleFactory from django_comment_client.tests.unicode import UnicodeTestMixin import django_comment_client.utils as utils from courseware.tests.factories import InstructorFactory from courseware.tabs import get_course_tab_list from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory from openedx.core.djangoapps.content.course_structures.models import CourseStructure from openedx.core.djangoapps.util.testing import ContentGroupTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase @attr('shard_1') class DictionaryTestCase(TestCase): def test_extract(self): d = {'cats': 'meow', 'dogs': 'woof'} k = ['cats', 'dogs', 'hamsters'] expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} self.assertEqual(utils.extract(d, k), expected) def test_strip_none(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_none(d), expected) def test_strip_blank(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_blank(d), expected) def test_merge_dict(self): d1 = {'cats': 'meow', 'dogs': 'woof'} d2 = {'lions': 'roar', 'ducks': 'quack'} expected = {'cats': 'meow', 'dogs': 'woof', 'lions': 'roar', 'ducks': 'quack'} self.assertEqual(utils.merge_dict(d1, d2), expected) @attr('shard_1') class AccessUtilsTestCase(ModuleStoreTestCase): """ Base testcase class for access and roles for the comment client service integration """ def setUp(self): super(AccessUtilsTestCase, self).setUp(create_user=False) self.course = CourseFactory.create() self.course_id = self.course.id self.student_role = RoleFactory(name='Student', course_id=self.course_id) self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id) self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id) self.student1 = UserFactory(username='student', email='student@edx.org') self.student1_enrollment = CourseEnrollmentFactory(user=self.student1) self.student_role.users.add(self.student1) self.student2 = UserFactory(username='student2', email='student2@edx.org') self.student2_enrollment = CourseEnrollmentFactory(user=self.student2) self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True) self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator) self.moderator_role.users.add(self.moderator) self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org') self.community_ta_role.users.add(self.community_ta1) self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org') self.community_ta_role.users.add(self.community_ta2) def test_get_role_ids(self): ret = utils.get_role_ids(self.course_id) expected = {u'Moderator': [3], u'Community TA': [4, 5]} self.assertEqual(ret, expected) def test_has_forum_access(self): ret = utils.has_forum_access('student', self.course_id, 'Student') self.assertTrue(ret) ret = utils.has_forum_access('not_a_student', self.course_id, 'Student') self.assertFalse(ret) ret = utils.has_forum_access('student', self.course_id, 'NotARole') self.assertFalse(ret) @attr('shard_1') class CoursewareContextTestCase(ModuleStoreTestCase): """ Base testcase class for courseware context for the comment client service integration """ def setUp(self): super(CoursewareContextTestCase, self).setUp(create_user=True) self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course") self.discussion1 = ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion1", discussion_category="Chapter", discussion_target="Discussion 1" ) self.discussion2 = ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion2", discussion_category="Chapter / Section / Subsection", discussion_target="Discussion 2" ) def test_empty(self): utils.add_courseware_context([], self.course, self.user) def test_missing_commentable_id(self): orig = {"commentable_id": "non-inline"} modified = dict(orig) utils.add_courseware_context([modified], self.course, self.user) self.assertEqual(modified, orig) def test_basic(self): threads = [ {"commentable_id": self.discussion1.discussion_id}, {"commentable_id": self.discussion2.discussion_id} ] utils.add_courseware_context(threads, self.course, self.user) def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name """Asserts that the given thread has the expected set of properties""" self.assertEqual( set(thread.keys()), set(["commentable_id", "courseware_url", "courseware_title"]) ) self.assertEqual( thread.get("courseware_url"), reverse( "jump_to", kwargs={ "course_id": self.course.id.to_deprecated_string(), "location": discussion.location.to_deprecated_string() } ) ) self.assertEqual(thread.get("courseware_title"), expected_title) assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1") assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2") class CachedDiscussionIdMapTestCase(ModuleStoreTestCase): """ Tests that using the cache of discussion id mappings has the same behavior as searching through the course. """ def setUp(self): super(CachedDiscussionIdMapTestCase, self).setUp(create_user=True) self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course') self.discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='test_discussion_id', discussion_category='Chapter', discussion_target='Discussion 1' ) self.discussion2 = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='test_discussion_id_2', discussion_category='Chapter 2', discussion_target='Discussion 2' ) self.private_discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='private_discussion_id', discussion_category='Chapter 3', discussion_target='Beta Testing', visible_to_staff_only=True ) self.bad_discussion = ItemFactory.create( parent_location=self.course.location, category='discussion', discussion_id='bad_discussion_id', discussion_category=None, discussion_target=None ) def test_cache_returns_correct_key(self): usage_key = utils.get_cached_discussion_key(self.course, 'test_discussion_id') self.assertEqual(usage_key, self.discussion.location) def test_cache_returns_none_if_id_is_not_present(self): usage_key = utils.get_cached_discussion_key(self.course, 'bogus_id') self.assertIsNone(usage_key) def test_cache_raises_exception_if_course_structure_not_cached(self): CourseStructure.objects.all().delete() with self.assertRaises(utils.DiscussionIdMapIsNotCached): utils.get_cached_discussion_key(self.course, 'test_discussion_id') def test_cache_raises_exception_if_discussion_id_not_cached(self): cache = CourseStructure.objects.get(course_id=self.course.id) cache.discussion_id_map_json = None cache.save() with self.assertRaises(utils.DiscussionIdMapIsNotCached): utils.get_cached_discussion_key(self.course, 'test_discussion_id') def test_module_does_not_have_required_keys(self): self.assertTrue(utils.has_required_keys(self.discussion)) self.assertFalse(utils.has_required_keys(self.bad_discussion)) def verify_discussion_metadata(self): """Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct""" metadata = utils.get_cached_discussion_id_map( self.course, ['test_discussion_id', 'test_discussion_id_2'], self.user ) discussion1 = metadata[self.discussion.discussion_id] discussion2 = metadata[self.discussion2.discussion_id] self.assertEqual(discussion1['location'], self.discussion.location) self.assertEqual(discussion1['title'], 'Chapter / Discussion 1') self.assertEqual(discussion2['location'], self.discussion2.location) self.assertEqual(discussion2['title'], 'Chapter 2 / Discussion 2') def test_get_discussion_id_map_from_cache(self): self.verify_discussion_metadata() def test_get_discussion_id_map_without_cache(self): CourseStructure.objects.all().delete() self.verify_discussion_metadata() def test_get_missing_discussion_id_map_from_cache(self): metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user) self.assertEqual(metadata, {}) def test_get_discussion_id_map_from_cache_without_access(self): user = UserFactory.create() metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user) self.assertEqual(metadata['private_discussion_id']['title'], 'Chapter 3 / Beta Testing') metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user) self.assertEqual(metadata, {}) def test_get_bad_discussion_id(self): metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user) self.assertEqual(metadata, {}) def test_discussion_id_accessible(self): self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id')) def test_bad_discussion_id_not_accessible(self): self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id')) def test_missing_discussion_id_not_accessible(self): self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bogus_id')) def test_discussion_id_not_accessible_without_access(self): user = UserFactory.create() self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id')) self.assertFalse(utils.discussion_category_id_access(self.course, user, 'private_discussion_id')) class CategoryMapTestMixin(object): """ Provides functionality for classes that test `get_discussion_category_map`. """ def assert_category_map_equals(self, expected, requesting_user=None): """ Call `get_discussion_category_map`, and verify that it returns what is expected. """ self.assertEqual( utils.get_discussion_category_map(self.course, requesting_user or self.user), expected ) @attr('shard_1') class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase): """ Base testcase class for discussion categories for the comment client service integration """ def setUp(self): super(CategoryMapTestCase, self).setUp(create_user=True) self.course = CourseFactory.create( org="TestX", number="101", display_name="Test Course", # This test needs to use a course that has already started -- # discussion topics only show up if the course has already started, # and the default start date for courses is Jan 1, 2030. start=datetime.datetime(2012, 2, 3, tzinfo=UTC) ) # Courses get a default discussion topic on creation, so remove it self.course.discussion_topics = {} self.course.save() self.discussion_num = 0 self.instructor = InstructorFactory(course_key=self.course.id) self.maxDiff = None # pylint: disable=invalid-name def create_discussion(self, discussion_category, discussion_target, **kwargs): self.discussion_num += 1 return ItemFactory.create( parent_location=self.course.location, category="discussion", discussion_id="discussion{}".format(self.discussion_num), discussion_category=discussion_category, discussion_target=discussion_target, **kwargs ) def assert_category_map_equals(self, expected, cohorted_if_in_list=False, exclude_unstarted=True): # pylint: disable=arguments-differ """ Asserts the expected map with the map returned by get_discussion_category_map method. """ self.assertEqual( utils.get_discussion_category_map(self.course, self.instructor, cohorted_if_in_list, exclude_unstarted), expected ) def test_empty(self): self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []}) def test_configured_topics(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } def check_cohorted_topics(expected_ids): # pylint: disable=missing-docstring self.assert_category_map_equals( { "entries": { "Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_cohorted": "Topic_A" in expected_ids}, "Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_cohorted": "Topic_B" in expected_ids}, "Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_cohorted": "Topic_C" in expected_ids}, }, "subcategories": {}, "children": ["Topic A", "Topic B", "Topic C"] } ) check_cohorted_topics([]) # default (empty) cohort config set_course_cohort_settings(course_key=self.course.id, is_cohorted=False, cohorted_discussions=[]) check_cohorted_topics([]) set_course_cohort_settings(course_key=self.course.id, is_cohorted=True, cohorted_discussions=[]) check_cohorted_topics([]) set_course_cohort_settings( course_key=self.course.id, is_cohorted=True, cohorted_discussions=["Topic_B", "Topic_C"], always_cohort_inline_discussions=False, ) check_cohorted_topics(["Topic_B", "Topic_C"]) set_course_cohort_settings( course_key=self.course.id, is_cohorted=True, cohorted_discussions=["Topic_A", "Some_Other_Topic"], always_cohort_inline_discussions=False, ) check_cohorted_topics(["Topic_A"]) # unlikely case, but make sure it works. set_course_cohort_settings( course_key=self.course.id, is_cohorted=False, cohorted_discussions=["Topic_A"], always_cohort_inline_discussions=False, ) check_cohorted_topics([]) def test_single_inline(self): self.create_discussion("Chapter", "Discussion") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Chapter"] } ) def test_inline_with_always_cohort_inline_discussion_flag(self): self.create_discussion("Chapter", "Discussion") set_course_cohort_settings(course_key=self.course.id, is_cohorted=True) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_cohorted": True, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Chapter"] } ) def test_inline_without_always_cohort_inline_discussion_flag(self): self.create_discussion("Chapter", "Discussion") set_course_cohort_settings(course_key=self.course.id, is_cohorted=True, always_cohort_inline_discussions=False) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion": { "id": "discussion1", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Chapter"] }, cohorted_if_in_list=True ) def test_get_unstarted_discussion_modules(self): later = datetime.datetime(datetime.MAXYEAR, 1, 1, tzinfo=django_utc()) self.create_discussion("Chapter 1", "Discussion 1", start=later) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_cohorted": False, "start_date": later } }, "subcategories": {}, "children": ["Discussion 1"], "start_date": later, "sort_key": "Chapter 1" } }, "children": ["Chapter 1"] }, cohorted_if_in_list=True, exclude_unstarted=False ) def test_tree(self): self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 1", "Discussion 2") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion") self.create_discussion("Chapter 3 / Section 1", "Discussion") def check_cohorted(is_cohorted): self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_cohorted": is_cohorted, }, "Discussion 2": { "id": "discussion2", "sort_key": None, "is_cohorted": is_cohorted, } }, "subcategories": {}, "children": ["Discussion 1", "Discussion 2"] }, "Chapter 2": { "entries": { "Discussion": { "id": "discussion3", "sort_key": None, "is_cohorted": is_cohorted, } }, "subcategories": { "Section 1": { "entries": {}, "subcategories": { "Subsection 1": { "entries": { "Discussion": { "id": "discussion4", "sort_key": None, "is_cohorted": is_cohorted, } }, "subcategories": {}, "children": ["Discussion"] }, "Subsection 2": { "entries": { "Discussion": { "id": "discussion5", "sort_key": None, "is_cohorted": is_cohorted, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Subsection 1", "Subsection 2"] } }, "children": ["Discussion", "Section 1"] }, "Chapter 3": { "entries": {}, "subcategories": { "Section 1": { "entries": { "Discussion": { "id": "discussion6", "sort_key": None, "is_cohorted": is_cohorted, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Section 1"] } }, "children": ["Chapter 1", "Chapter 2", "Chapter 3"] } ) # empty / default config check_cohorted(False) # explicitly disabled cohorting set_course_cohort_settings(course_key=self.course.id, is_cohorted=False) check_cohorted(False) # explicitly enabled cohorting set_course_cohort_settings(course_key=self.course.id, is_cohorted=True) check_cohorted(True) def test_tree_with_duplicate_targets(self): self.create_discussion("Chapter 1", "Discussion A") self.create_discussion("Chapter 1", "Discussion B") self.create_discussion("Chapter 1", "Discussion A") # duplicate self.create_discussion("Chapter 1", "Discussion A") # another duplicate self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate category_map = utils.get_discussion_category_map(self.course, self.user) chapter1 = category_map["subcategories"]["Chapter 1"] chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"]) self.assertEqual(set(chapter1["children"]), chapter1_discussions) self.assertEqual(set(chapter1["entries"].keys()), chapter1_discussions) chapter2 = category_map["subcategories"]["Chapter 2"] subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"] subsection1_discussions = set(["Discussion", "Discussion (1)"]) self.assertEqual(set(subsection1["children"]), subsection1_discussions) self.assertEqual(set(subsection1["entries"].keys()), subsection1_discussions) def test_start_date_filter(self): now = datetime.datetime.now() later = datetime.datetime.max self.create_discussion("Chapter 1", "Discussion 1", start=now) self.create_discussion("Chapter 1", "Discussion 2 обсуждение", start=later) self.create_discussion("Chapter 2", "Discussion", start=now) self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=later) self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=later) self.create_discussion("Chapter 3 / Section 1", "Discussion", start=later) self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter 1": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion 1"] }, "Chapter 2": { "entries": { "Discussion": { "id": "discussion3", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Chapter 1", "Chapter 2"] } ) self.maxDiff = None def test_sort_inline_explicit(self): self.create_discussion("Chapter", "Discussion 1", sort_key="D") self.create_discussion("Chapter", "Discussion 2", sort_key="A") self.create_discussion("Chapter", "Discussion 3", sort_key="E") self.create_discussion("Chapter", "Discussion 4", sort_key="C") self.create_discussion("Chapter", "Discussion 5", sort_key="B") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion 1": { "id": "discussion1", "sort_key": "D", "is_cohorted": False, }, "Discussion 2": { "id": "discussion2", "sort_key": "A", "is_cohorted": False, }, "Discussion 3": { "id": "discussion3", "sort_key": "E", "is_cohorted": False, }, "Discussion 4": { "id": "discussion4", "sort_key": "C", "is_cohorted": False, }, "Discussion 5": { "id": "discussion5", "sort_key": "B", "is_cohorted": False, } }, "subcategories": {}, "children": [ "Discussion 2", "Discussion 5", "Discussion 4", "Discussion 1", "Discussion 3" ] } }, "children": ["Chapter"] } ) def test_sort_configured_topics_explicit(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A", "sort_key": "B"}, "Topic B": {"id": "Topic_B", "sort_key": "C"}, "Topic C": {"id": "Topic_C", "sort_key": "A"} } self.assert_category_map_equals( { "entries": { "Topic A": {"id": "Topic_A", "sort_key": "B", "is_cohorted": False}, "Topic B": {"id": "Topic_B", "sort_key": "C", "is_cohorted": False}, "Topic C": {"id": "Topic_C", "sort_key": "A", "is_cohorted": False}, }, "subcategories": {}, "children": ["Topic C", "Topic A", "Topic B"] } ) def test_sort_alpha(self): self.course.discussion_sort_alpha = True self.course.save() self.create_discussion("Chapter", "Discussion D") self.create_discussion("Chapter", "Discussion A") self.create_discussion("Chapter", "Discussion E") self.create_discussion("Chapter", "Discussion C") self.create_discussion("Chapter", "Discussion B") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter": { "entries": { "Discussion D": { "id": "discussion1", "sort_key": "Discussion D", "is_cohorted": False, }, "Discussion A": { "id": "discussion2", "sort_key": "Discussion A", "is_cohorted": False, }, "Discussion E": { "id": "discussion3", "sort_key": "Discussion E", "is_cohorted": False, }, "Discussion C": { "id": "discussion4", "sort_key": "Discussion C", "is_cohorted": False, }, "Discussion B": { "id": "discussion5", "sort_key": "Discussion B", "is_cohorted": False, } }, "subcategories": {}, "children": [ "Discussion A", "Discussion B", "Discussion C", "Discussion D", "Discussion E" ] } }, "children": ["Chapter"] } ) def test_sort_intermediates(self): self.create_discussion("Chapter B", "Discussion 2") self.create_discussion("Chapter C", "Discussion") self.create_discussion("Chapter A", "Discussion 1") self.create_discussion("Chapter B", "Discussion 1") self.create_discussion("Chapter A", "Discussion 2") self.assert_category_map_equals( { "entries": {}, "subcategories": { "Chapter A": { "entries": { "Discussion 1": { "id": "discussion3", "sort_key": None, "is_cohorted": False, }, "Discussion 2": { "id": "discussion5", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion 1", "Discussion 2"] }, "Chapter B": { "entries": { "Discussion 1": { "id": "discussion4", "sort_key": None, "is_cohorted": False, }, "Discussion 2": { "id": "discussion1", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion 1", "Discussion 2"] }, "Chapter C": { "entries": { "Discussion": { "id": "discussion2", "sort_key": None, "is_cohorted": False, } }, "subcategories": {}, "children": ["Discussion"] } }, "children": ["Chapter A", "Chapter B", "Chapter C"] } ) def test_ids_empty(self): self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), []) def test_ids_configured_topics(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } self.assertItemsEqual( utils.get_discussion_categories_ids(self.course, self.user), ["Topic_A", "Topic_B", "Topic_C"] ) def test_ids_inline(self): self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 1", "Discussion 2") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion") self.create_discussion("Chapter 3 / Section 1", "Discussion") self.assertItemsEqual( utils.get_discussion_categories_ids(self.course, self.user), ["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"] ) def test_ids_mixed(self): self.course.discussion_topics = { "Topic A": {"id": "Topic_A"}, "Topic B": {"id": "Topic_B"}, "Topic C": {"id": "Topic_C"} } self.create_discussion("Chapter 1", "Discussion 1") self.create_discussion("Chapter 2", "Discussion") self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") self.assertItemsEqual( utils.get_discussion_categories_ids(self.course, self.user), ["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"] ) @attr('shard_1') class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase): """ Tests `get_discussion_category_map` on discussion modules which are only visible to some content groups. """ def test_staff_user(self): """ Verify that the staff user can access the alpha, beta, and global discussion topics. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ 'Visible to Alpha', 'Visible to Beta', 'Visible to Everyone' ], 'entries': { 'Visible to Alpha': { 'sort_key': None, 'is_cohorted': True, 'id': 'alpha_group_discussion' }, 'Visible to Beta': { 'sort_key': None, 'is_cohorted': True, 'id': 'beta_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_cohorted': True, 'id': 'global_group_discussion' } } } }, 'children': ['General', 'Week 1'], 'entries': { 'General': { 'sort_key': 'General', 'is_cohorted': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.staff_user ) def test_alpha_user(self): """ Verify that the alpha user can access the alpha and global discussion topics. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ 'Visible to Alpha', 'Visible to Everyone' ], 'entries': { 'Visible to Alpha': { 'sort_key': None, 'is_cohorted': True, 'id': 'alpha_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_cohorted': True, 'id': 'global_group_discussion' } } } }, 'children': ['General', 'Week 1'], 'entries': { 'General': { 'sort_key': 'General', 'is_cohorted': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.alpha_user ) def test_beta_user(self): """ Verify that the beta user can access the beta and global discussion topics. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ 'Visible to Beta', 'Visible to Everyone' ], 'entries': { 'Visible to Beta': { 'sort_key': None, 'is_cohorted': True, 'id': 'beta_group_discussion' }, 'Visible to Everyone': { 'sort_key': None, 'is_cohorted': True, 'id': 'global_group_discussion' } } } }, 'children': ['General', 'Week 1'], 'entries': { 'General': { 'sort_key': 'General', 'is_cohorted': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.beta_user ) def test_non_cohorted_user(self): """ Verify that the non-cohorted user can access the global discussion topic. """ self.assert_category_map_equals( { 'subcategories': { 'Week 1': { 'subcategories': {}, 'children': [ 'Visible to Everyone' ], 'entries': { 'Visible to Everyone': { 'sort_key': None, 'is_cohorted': True, 'id': 'global_group_discussion' } } } }, 'children': ['General', 'Week 1'], 'entries': { 'General': { 'sort_key': 'General', 'is_cohorted': False, 'id': 'i4x-org-number-course-run' } } }, requesting_user=self.non_cohorted_user ) class JsonResponseTestCase(TestCase, UnicodeTestMixin): def _test_unicode_data(self, text): response = utils.JsonResponse(text) reparsed = json.loads(response.content) self.assertEqual(reparsed, text) @attr('shard_1') class RenderMustacheTests(TestCase): """ Test the `render_mustache` utility function. """ @mock.patch('edxmako.LOOKUP', {}) def test_it(self): """ Basic test. """ add_lookup('main', '', package=__name__) self.assertEqual(utils.render_mustache('test.mustache', {}), 'Testing 1 2 3.\n') class DiscussionTabTestCase(ModuleStoreTestCase): """ Test visibility of the discussion tab. """ def setUp(self): super(DiscussionTabTestCase, self).setUp() self.course = CourseFactory.create() self.enrolled_user = UserFactory.create() self.staff_user = AdminFactory.create() CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id) self.unenrolled_user = UserFactory.create() def discussion_tab_present(self, user): """ Returns true if the user has access to the discussion tab. """ request = RequestFactory().request() request.user = user all_tabs = get_course_tab_list(request, self.course) return any(tab.type == 'discussion' for tab in all_tabs) def test_tab_access(self): with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}): self.assertTrue(self.discussion_tab_present(self.staff_user)) self.assertTrue(self.discussion_tab_present(self.enrolled_user)) self.assertFalse(self.discussion_tab_present(self.unenrolled_user)) @mock.patch('ccx.overrides.get_current_ccx') def test_tab_settings(self, mock_get_ccx): mock_get_ccx.return_value = True with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}): self.assertFalse(self.discussion_tab_present(self.enrolled_user)) with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}): self.assertFalse(self.discussion_tab_present(self.enrolled_user))
LearningProgress/LearningProgress
refs/heads/master
tests/test_home.py
2
from django.test import TestCase from django.test.client import Client class Home(TestCase): """ Tests home view. """ def test_get(self): response = Client().get('/') self.assertEqual(response.status_code, 200)
moylop260/odoo-dev
refs/heads/master
addons/google_calendar/__init__.py
436
import res_config import google_calendar import controllers
will-iam/Variant
refs/heads/master
casepy/eulerRuO2/nSedov128x128/chars.py
2
import sys, os sys.path.insert(1, os.path.join(sys.path[0], '../../../')) import script.rio as io import script.initial_condition.sedov as sedov # Domain properties lx = 1.2 ly = 1.2 Nx = 128 Ny = 128 # Scheme execution options T = 1.0 CFL = 0.5 gamma = 1.4 BClayer = 1 quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE'] def buildme(quantityDict, coords_to_uid, coords_to_bc): sedov.build(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
kosz85/django
refs/heads/master
tests/string_lookup/__init__.py
12133432
t0n15/final-project
refs/heads/master
tailbone/static/__init__.py
12133432
mancoast/CPythonPyc_test
refs/heads/master
fail/332_test_types.py
28
# Python test set -- part 6, built-in types from test.support import run_unittest, run_with_locale import collections import locale import sys import types import unittest class TypesTests(unittest.TestCase): def test_truth_values(self): if None: self.fail('None is true instead of false') if 0: self.fail('0 is true instead of false') if 0.0: self.fail('0.0 is true instead of false') if '': self.fail('\'\' is true instead of false') if not 1: self.fail('1 is false instead of true') if not 1.0: self.fail('1.0 is false instead of true') if not 'x': self.fail('\'x\' is false instead of true') if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true') def f(): pass class C: pass x = C() if not f: self.fail('f is false instead of true') if not C: self.fail('C is false instead of true') if not sys: self.fail('sys is false instead of true') if not x: self.fail('x is false instead of true') def test_boolean_ops(self): if 0 or 0: self.fail('0 or 0 is true instead of false') if 1 and 1: pass else: self.fail('1 and 1 is false instead of true') if not 1: self.fail('not 1 is true instead of false') def test_comparisons(self): if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass else: self.fail('int comparisons failed') if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass else: self.fail('float comparisons failed') if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass else: self.fail('string comparisons failed') if None is None: pass else: self.fail('identity test failed') def test_float_constructor(self): self.assertRaises(ValueError, float, '') self.assertRaises(ValueError, float, '5\0') def test_zero_division(self): try: 5.0 / 0.0 except ZeroDivisionError: pass else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError") try: 5.0 // 0.0 except ZeroDivisionError: pass else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError") try: 5.0 % 0.0 except ZeroDivisionError: pass else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError") try: 5 / 0 except ZeroDivisionError: pass else: self.fail("5 / 0 didn't raise ZeroDivisionError") try: 5 // 0 except ZeroDivisionError: pass else: self.fail("5 // 0 didn't raise ZeroDivisionError") try: 5 % 0 except ZeroDivisionError: pass else: self.fail("5 % 0 didn't raise ZeroDivisionError") def test_numeric_types(self): if 0 != 0.0 or 1 != 1.0 or -1 != -1.0: self.fail('int/float value not equal') # calling built-in types without argument must return 0 if int() != 0: self.fail('int() does not return 0') if float() != 0.0: self.fail('float() does not return 0.0') if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass else: self.fail('int() does not round properly') if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass else: self.fail('float() does not work properly') def test_float_to_string(self): def test(f, result): self.assertEqual(f.__format__('e'), result) self.assertEqual('%e' % f, result) # test all 2 digit exponents, both with __format__ and with # '%' formatting for i in range(-99, 100): test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i)) # test some 3 digit exponents self.assertEqual(1.5e100.__format__('e'), '1.500000e+100') self.assertEqual('%e' % 1.5e100, '1.500000e+100') self.assertEqual(1.5e101.__format__('e'), '1.500000e+101') self.assertEqual('%e' % 1.5e101, '1.500000e+101') self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100') self.assertEqual('%e' % 1.5e-100, '1.500000e-100') self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101') self.assertEqual('%e' % 1.5e-101, '1.500000e-101') self.assertEqual('%g' % 1.0, '1') self.assertEqual('%#g' % 1.0, '1.00000') def test_normal_integers(self): # Ensure the first 256 integers are shared a = 256 b = 128*2 if a is not b: self.fail('256 is not shared') if 12 + 24 != 36: self.fail('int op') if 12 + (-24) != -12: self.fail('int op') if (-12) + 24 != 12: self.fail('int op') if (-12) + (-24) != -36: self.fail('int op') if not 12 < 24: self.fail('int op') if not -24 < -12: self.fail('int op') # Test for a particular bug in integer multiply xsize, ysize, zsize = 238, 356, 4 if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912): self.fail('int mul commutativity') # And another. m = -sys.maxsize - 1 for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor prod = divisor * j if prod != m: self.fail("%r * %r == %r != %r" % (divisor, j, prod, m)) if type(prod) is not int: self.fail("expected type(prod) to be int, not %r" % type(prod)) # Check for unified integral type for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor - 1 prod = divisor * j if type(prod) is not int: self.fail("expected type(%r) to be int, not %r" % (prod, type(prod))) # Check for unified integral type m = sys.maxsize for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor + 1 prod = divisor * j if type(prod) is not int: self.fail("expected type(%r) to be int, not %r" % (prod, type(prod))) x = sys.maxsize self.assertIsInstance(x + 1, int, "(sys.maxsize + 1) should have returned int") self.assertIsInstance(-x - 1, int, "(-sys.maxsize - 1) should have returned int") self.assertIsInstance(-x - 2, int, "(-sys.maxsize - 2) should have returned int") try: 5 << -5 except ValueError: pass else: self.fail('int negative shift <<') try: 5 >> -5 except ValueError: pass else: self.fail('int negative shift >>') def test_floats(self): if 12.0 + 24.0 != 36.0: self.fail('float op') if 12.0 + (-24.0) != -12.0: self.fail('float op') if (-12.0) + 24.0 != 12.0: self.fail('float op') if (-12.0) + (-24.0) != -36.0: self.fail('float op') if not 12.0 < 24.0: self.fail('float op') if not -24.0 < -12.0: self.fail('float op') def test_strings(self): if len('') != 0: self.fail('len(\'\')') if len('a') != 1: self.fail('len(\'a\')') if len('abcdef') != 6: self.fail('len(\'abcdef\')') if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation') if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3') if 0*'abcde' != '': self.fail('string repetition 0*') if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string') if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass else: self.fail('in/not in string') x = 'x'*103 if '%s!'%x != x+'!': self.fail('nasty string formatting bug') #extended slices for strings a = '0123456789' self.assertEqual(a[::], a) self.assertEqual(a[::2], '02468') self.assertEqual(a[1::2], '13579') self.assertEqual(a[::-1],'9876543210') self.assertEqual(a[::-2], '97531') self.assertEqual(a[3::-2], '31') self.assertEqual(a[-100:100:], a) self.assertEqual(a[100:-100:-1], a[::-1]) self.assertEqual(a[-100:100:2], '02468') def test_type_function(self): self.assertRaises(TypeError, type, 1, 2) self.assertRaises(TypeError, type, 1, 2, 3, 4) def test_int__format__(self): def test(i, format_spec, result): # just make sure we have the unified type for integers assert type(i) == int assert type(format_spec) == str self.assertEqual(i.__format__(format_spec), result) test(123456789, 'd', '123456789') test(123456789, 'd', '123456789') test(1, 'c', '\01') # sign and aligning are interdependent test(1, "-", '1') test(-1, "-", '-1') test(1, "-3", ' 1') test(-1, "-3", ' -1') test(1, "+3", ' +1') test(-1, "+3", ' -1') test(1, " 3", ' 1') test(-1, " 3", ' -1') test(1, " ", ' 1') test(-1, " ", '-1') # hex test(3, "x", "3") test(3, "X", "3") test(1234, "x", "4d2") test(-1234, "x", "-4d2") test(1234, "8x", " 4d2") test(-1234, "8x", " -4d2") test(1234, "x", "4d2") test(-1234, "x", "-4d2") test(-3, "x", "-3") test(-3, "X", "-3") test(int('be', 16), "x", "be") test(int('be', 16), "X", "BE") test(-int('be', 16), "x", "-be") test(-int('be', 16), "X", "-BE") # octal test(3, "o", "3") test(-3, "o", "-3") test(65, "o", "101") test(-65, "o", "-101") test(1234, "o", "2322") test(-1234, "o", "-2322") test(1234, "-o", "2322") test(-1234, "-o", "-2322") test(1234, " o", " 2322") test(-1234, " o", "-2322") test(1234, "+o", "+2322") test(-1234, "+o", "-2322") # binary test(3, "b", "11") test(-3, "b", "-11") test(1234, "b", "10011010010") test(-1234, "b", "-10011010010") test(1234, "-b", "10011010010") test(-1234, "-b", "-10011010010") test(1234, " b", " 10011010010") test(-1234, " b", "-10011010010") test(1234, "+b", "+10011010010") test(-1234, "+b", "-10011010010") # alternate (#) formatting test(0, "#b", '0b0') test(0, "-#b", '0b0') test(1, "-#b", '0b1') test(-1, "-#b", '-0b1') test(-1, "-#5b", ' -0b1') test(1, "+#5b", ' +0b1') test(100, "+#b", '+0b1100100') test(100, "#012b", '0b0001100100') test(-100, "#012b", '-0b001100100') test(0, "#o", '0o0') test(0, "-#o", '0o0') test(1, "-#o", '0o1') test(-1, "-#o", '-0o1') test(-1, "-#5o", ' -0o1') test(1, "+#5o", ' +0o1') test(100, "+#o", '+0o144') test(100, "#012o", '0o0000000144') test(-100, "#012o", '-0o000000144') test(0, "#x", '0x0') test(0, "-#x", '0x0') test(1, "-#x", '0x1') test(-1, "-#x", '-0x1') test(-1, "-#5x", ' -0x1') test(1, "+#5x", ' +0x1') test(100, "+#x", '+0x64') test(100, "#012x", '0x0000000064') test(-100, "#012x", '-0x000000064') test(123456, "#012x", '0x000001e240') test(-123456, "#012x", '-0x00001e240') test(0, "#X", '0X0') test(0, "-#X", '0X0') test(1, "-#X", '0X1') test(-1, "-#X", '-0X1') test(-1, "-#5X", ' -0X1') test(1, "+#5X", ' +0X1') test(100, "+#X", '+0X64') test(100, "#012X", '0X0000000064') test(-100, "#012X", '-0X000000064') test(123456, "#012X", '0X000001E240') test(-123456, "#012X", '-0X00001E240') test(123, ',', '123') test(-123, ',', '-123') test(1234, ',', '1,234') test(-1234, ',', '-1,234') test(123456, ',', '123,456') test(-123456, ',', '-123,456') test(1234567, ',', '1,234,567') test(-1234567, ',', '-1,234,567') # issue 5782, commas with no specifier type test(1234, '010,', '00,001,234') # Unified type for integers test(10**100, 'd', '1' + '0' * 100) test(10**100+100, 'd', '1' + '0' * 97 + '100') # make sure these are errors # precision disallowed self.assertRaises(ValueError, 3 .__format__, "1.3") # sign not allowed with 'c' self.assertRaises(ValueError, 3 .__format__, "+c") # format spec must be string self.assertRaises(TypeError, 3 .__format__, None) self.assertRaises(TypeError, 3 .__format__, 0) # can't have ',' with 'n' self.assertRaises(ValueError, 3 .__format__, ",n") # can't have ',' with 'c' self.assertRaises(ValueError, 3 .__format__, ",c") # ensure that only int and float type specifiers work for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'bcdoxXeEfFgGn%': self.assertRaises(ValueError, 0 .__format__, format_spec) self.assertRaises(ValueError, 1 .__format__, format_spec) self.assertRaises(ValueError, (-1) .__format__, format_spec) # ensure that float type specifiers work; format converts # the int to a float for format_spec in 'eEfFgG%': for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]: self.assertEqual(value.__format__(format_spec), float(value).__format__(format_spec)) # Issue 6902 test(123456, "0<20", '12345600000000000000') test(123456, "1<20", '12345611111111111111') test(123456, "*<20", '123456**************') test(123456, "0>20", '00000000000000123456') test(123456, "1>20", '11111111111111123456') test(123456, "*>20", '**************123456') test(123456, "0=20", '00000000000000123456') test(123456, "1=20", '11111111111111123456') test(123456, "*=20", '**************123456') @run_with_locale('LC_NUMERIC', 'en_US.UTF8') def test_float__format__locale(self): # test locale support for __format__ code 'n' for i in range(-10, 10): x = 1234567890.0 * (10.0 ** i) self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n')) self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n')) @run_with_locale('LC_NUMERIC', 'en_US.UTF8') def test_int__format__locale(self): # test locale support for __format__ code 'n' for integers x = 123456789012345678901234567890 for i in range(0, 30): self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n')) # move to the next integer to test x = x // 10 rfmt = ">20n" lfmt = "<20n" cfmt = "^20n" for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900): self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt))) self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt))) self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt))) def test_float__format__(self): def test(f, format_spec, result): self.assertEqual(f.__format__(format_spec), result) self.assertEqual(format(f, format_spec), result) test(0.0, 'f', '0.000000') # the default is 'g', except for empty format spec test(0.0, '', '0.0') test(0.01, '', '0.01') test(0.01, 'g', '0.01') # test for issue 3411 test(1.23, '1', '1.23') test(-1.23, '1', '-1.23') test(1.23, '1g', '1.23') test(-1.23, '1g', '-1.23') test( 1.0, ' g', ' 1') test(-1.0, ' g', '-1') test( 1.0, '+g', '+1') test(-1.0, '+g', '-1') test(1.1234e200, 'g', '1.1234e+200') test(1.1234e200, 'G', '1.1234E+200') test(1.0, 'f', '1.000000') test(-1.0, 'f', '-1.000000') test( 1.0, ' f', ' 1.000000') test(-1.0, ' f', '-1.000000') test( 1.0, '+f', '+1.000000') test(-1.0, '+f', '-1.000000') # Python versions <= 3.0 switched from 'f' to 'g' formatting for # values larger than 1e50. No longer. f = 1.1234e90 for fmt in 'f', 'F': # don't do a direct equality check, since on some # platforms only the first few digits of dtoa # will be reliable result = f.__format__(fmt) self.assertEqual(len(result), 98) self.assertEqual(result[-7], '.') self.assertIn(result[:12], ('112340000000', '112339999999')) f = 1.1234e200 for fmt in 'f', 'F': result = f.__format__(fmt) self.assertEqual(len(result), 208) self.assertEqual(result[-7], '.') self.assertIn(result[:12], ('112340000000', '112339999999')) test( 1.0, 'e', '1.000000e+00') test(-1.0, 'e', '-1.000000e+00') test( 1.0, 'E', '1.000000E+00') test(-1.0, 'E', '-1.000000E+00') test(1.1234e20, 'e', '1.123400e+20') test(1.1234e20, 'E', '1.123400E+20') # No format code means use g, but must have a decimal # and a number after the decimal. This is tricky, because # a totaly empty format specifier means something else. # So, just use a sign flag test(1e200, '+g', '+1e+200') test(1e200, '+', '+1e+200') test(1.1e200, '+g', '+1.1e+200') test(1.1e200, '+', '+1.1e+200') # 0 padding test(1234., '010f', '1234.000000') test(1234., '011f', '1234.000000') test(1234., '012f', '01234.000000') test(-1234., '011f', '-1234.000000') test(-1234., '012f', '-1234.000000') test(-1234., '013f', '-01234.000000') test(-1234.12341234, '013f', '-01234.123412') test(-123456.12341234, '011.2f', '-0123456.12') # issue 5782, commas with no specifier type test(1.2, '010,.2', '0,000,001.2') # 0 padding with commas test(1234., '011,f', '1,234.000000') test(1234., '012,f', '1,234.000000') test(1234., '013,f', '01,234.000000') test(-1234., '012,f', '-1,234.000000') test(-1234., '013,f', '-1,234.000000') test(-1234., '014,f', '-01,234.000000') test(-12345., '015,f', '-012,345.000000') test(-123456., '016,f', '-0,123,456.000000') test(-123456., '017,f', '-0,123,456.000000') test(-123456.12341234, '017,f', '-0,123,456.123412') test(-123456.12341234, '013,.2f', '-0,123,456.12') # % formatting test(-1.0, '%', '-100.000000%') # format spec must be string self.assertRaises(TypeError, 3.0.__format__, None) self.assertRaises(TypeError, 3.0.__format__, 0) # other format specifiers shouldn't work on floats, # in particular int specifiers for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'eEfFgGn%': self.assertRaises(ValueError, format, 0.0, format_spec) self.assertRaises(ValueError, format, 1.0, format_spec) self.assertRaises(ValueError, format, -1.0, format_spec) self.assertRaises(ValueError, format, 1e100, format_spec) self.assertRaises(ValueError, format, -1e100, format_spec) self.assertRaises(ValueError, format, 1e-100, format_spec) self.assertRaises(ValueError, format, -1e-100, format_spec) # Alternate float formatting test(1.0, '.0e', '1e+00') test(1.0, '#.0e', '1.e+00') test(1.0, '.0f', '1') test(1.0, '#.0f', '1.') test(1.1, 'g', '1.1') test(1.1, '#g', '1.10000') test(1.0, '.0%', '100%') test(1.0, '#.0%', '100.%') # Issue 7094: Alternate formatting (specified by #) test(1.0, '0e', '1.000000e+00') test(1.0, '#0e', '1.000000e+00') test(1.0, '0f', '1.000000' ) test(1.0, '#0f', '1.000000') test(1.0, '.1e', '1.0e+00') test(1.0, '#.1e', '1.0e+00') test(1.0, '.1f', '1.0') test(1.0, '#.1f', '1.0') test(1.0, '.1%', '100.0%') test(1.0, '#.1%', '100.0%') # Issue 6902 test(12345.6, "0<20", '12345.60000000000000') test(12345.6, "1<20", '12345.61111111111111') test(12345.6, "*<20", '12345.6*************') test(12345.6, "0>20", '000000000000012345.6') test(12345.6, "1>20", '111111111111112345.6') test(12345.6, "*>20", '*************12345.6') test(12345.6, "0=20", '000000000000012345.6') test(12345.6, "1=20", '111111111111112345.6') test(12345.6, "*=20", '*************12345.6') def test_format_spec_errors(self): # int, float, and string all share the same format spec # mini-language parser. # Check that we can't ask for too many digits. This is # probably a CPython specific test. It tries to put the width # into a C long. self.assertRaises(ValueError, format, 0, '1'*10000 + 'd') # Similar with the precision. self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd') # And may as well test both. self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd') # Make sure commas aren't allowed with various type codes for code in 'xXobns': self.assertRaises(ValueError, format, 0, ',' + code) def test_internal_sizes(self): self.assertGreater(object.__basicsize__, 0) self.assertGreater(tuple.__itemsize__, 0) class MappingProxyTests(unittest.TestCase): mappingproxy = types.MappingProxyType def test_constructor(self): class userdict(dict): pass mapping = {'x': 1, 'y': 2} self.assertEqual(self.mappingproxy(mapping), mapping) mapping = userdict(x=1, y=2) self.assertEqual(self.mappingproxy(mapping), mapping) mapping = collections.ChainMap({'x': 1}, {'y': 2}) self.assertEqual(self.mappingproxy(mapping), mapping) self.assertRaises(TypeError, self.mappingproxy, 10) self.assertRaises(TypeError, self.mappingproxy, ("a", "tuple")) self.assertRaises(TypeError, self.mappingproxy, ["a", "list"]) def test_methods(self): attrs = set(dir(self.mappingproxy({}))) - set(dir(object())) self.assertEqual(attrs, { '__contains__', '__getitem__', '__iter__', '__len__', 'copy', 'get', 'items', 'keys', 'values', }) def test_get(self): view = self.mappingproxy({'a': 'A', 'b': 'B'}) self.assertEqual(view['a'], 'A') self.assertEqual(view['b'], 'B') self.assertRaises(KeyError, view.__getitem__, 'xxx') self.assertEqual(view.get('a'), 'A') self.assertIsNone(view.get('xxx')) self.assertEqual(view.get('xxx', 42), 42) def test_missing(self): class dictmissing(dict): def __missing__(self, key): return "missing=%s" % key view = self.mappingproxy(dictmissing(x=1)) self.assertEqual(view['x'], 1) self.assertEqual(view['y'], 'missing=y') self.assertEqual(view.get('x'), 1) self.assertEqual(view.get('y'), None) self.assertEqual(view.get('y', 42), 42) self.assertTrue('x' in view) self.assertFalse('y' in view) def test_customdict(self): class customdict(dict): def __contains__(self, key): if key == 'magic': return True else: return dict.__contains__(self, key) def __iter__(self): return iter(('iter',)) def __len__(self): return 500 def copy(self): return 'copy' def keys(self): return 'keys' def items(self): return 'items' def values(self): return 'values' def __getitem__(self, key): return "getitem=%s" % dict.__getitem__(self, key) def get(self, key, default=None): return "get=%s" % dict.get(self, key, 'default=%r' % default) custom = customdict({'key': 'value'}) view = self.mappingproxy(custom) self.assertTrue('key' in view) self.assertTrue('magic' in view) self.assertFalse('xxx' in view) self.assertEqual(view['key'], 'getitem=value') self.assertRaises(KeyError, view.__getitem__, 'xxx') self.assertEqual(tuple(view), ('iter',)) self.assertEqual(len(view), 500) self.assertEqual(view.copy(), 'copy') self.assertEqual(view.get('key'), 'get=value') self.assertEqual(view.get('xxx'), 'get=default=None') self.assertEqual(view.items(), 'items') self.assertEqual(view.keys(), 'keys') self.assertEqual(view.values(), 'values') def test_chainmap(self): d1 = {'x': 1} d2 = {'y': 2} mapping = collections.ChainMap(d1, d2) view = self.mappingproxy(mapping) self.assertTrue('x' in view) self.assertTrue('y' in view) self.assertFalse('z' in view) self.assertEqual(view['x'], 1) self.assertEqual(view['y'], 2) self.assertRaises(KeyError, view.__getitem__, 'z') self.assertEqual(tuple(sorted(view)), ('x', 'y')) self.assertEqual(len(view), 2) copy = view.copy() self.assertIsNot(copy, mapping) self.assertIsInstance(copy, collections.ChainMap) self.assertEqual(copy, mapping) self.assertEqual(view.get('x'), 1) self.assertEqual(view.get('y'), 2) self.assertIsNone(view.get('z')) self.assertEqual(tuple(sorted(view.items())), (('x', 1), ('y', 2))) self.assertEqual(tuple(sorted(view.keys())), ('x', 'y')) self.assertEqual(tuple(sorted(view.values())), (1, 2)) def test_contains(self): view = self.mappingproxy(dict.fromkeys('abc')) self.assertTrue('a' in view) self.assertTrue('b' in view) self.assertTrue('c' in view) self.assertFalse('xxx' in view) def test_views(self): mapping = {} view = self.mappingproxy(mapping) keys = view.keys() values = view.values() items = view.items() self.assertEqual(list(keys), []) self.assertEqual(list(values), []) self.assertEqual(list(items), []) mapping['key'] = 'value' self.assertEqual(list(keys), ['key']) self.assertEqual(list(values), ['value']) self.assertEqual(list(items), [('key', 'value')]) def test_len(self): for expected in range(6): data = dict.fromkeys('abcde'[:expected]) self.assertEqual(len(data), expected) view = self.mappingproxy(data) self.assertEqual(len(view), expected) def test_iterators(self): keys = ('x', 'y') values = (1, 2) items = tuple(zip(keys, values)) view = self.mappingproxy(dict(items)) self.assertEqual(set(view), set(keys)) self.assertEqual(set(view.keys()), set(keys)) self.assertEqual(set(view.values()), set(values)) self.assertEqual(set(view.items()), set(items)) def test_copy(self): original = {'key1': 27, 'key2': 51, 'key3': 93} view = self.mappingproxy(original) copy = view.copy() self.assertEqual(type(copy), dict) self.assertEqual(copy, original) original['key1'] = 70 self.assertEqual(view['key1'], 70) self.assertEqual(copy['key1'], 27) class ClassCreationTests(unittest.TestCase): class Meta(type): def __init__(cls, name, bases, ns, **kw): super().__init__(name, bases, ns) @staticmethod def __new__(mcls, name, bases, ns, **kw): return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases, **kw): ns = super().__prepare__(name, bases) ns["y"] = 1 ns.update(kw) return ns def test_new_class_basics(self): C = types.new_class("C") self.assertEqual(C.__name__, "C") self.assertEqual(C.__bases__, (object,)) def test_new_class_subclass(self): C = types.new_class("C", (int,)) self.assertTrue(issubclass(C, int)) def test_new_class_meta(self): Meta = self.Meta settings = {"metaclass": Meta, "z": 2} # We do this twice to make sure the passed in dict isn't mutated for i in range(2): C = types.new_class("C" + str(i), (), settings) self.assertIsInstance(C, Meta) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) def test_new_class_exec_body(self): Meta = self.Meta def func(ns): ns["x"] = 0 C = types.new_class("C", (), {"metaclass": Meta, "z": 2}, func) self.assertIsInstance(C, Meta) self.assertEqual(C.x, 0) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) def test_new_class_metaclass_keywords(self): #Test that keywords are passed to the metaclass: def meta_func(name, bases, ns, **kw): return name, bases, ns, kw res = types.new_class("X", (int, object), dict(metaclass=meta_func, x=0)) self.assertEqual(res, ("X", (int, object), {}, {"x": 0})) def test_new_class_defaults(self): # Test defaults/keywords: C = types.new_class("C", (), {}, None) self.assertEqual(C.__name__, "C") self.assertEqual(C.__bases__, (object,)) def test_new_class_meta_with_base(self): Meta = self.Meta def func(ns): ns["x"] = 0 C = types.new_class(name="C", bases=(int,), kwds=dict(metaclass=Meta, z=2), exec_body=func) self.assertTrue(issubclass(C, int)) self.assertIsInstance(C, Meta) self.assertEqual(C.x, 0) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) # Many of the following tests are derived from test_descr.py def test_prepare_class(self): # Basic test of metaclass derivation expected_ns = {} class A(type): def __new__(*args, **kwargs): return type.__new__(*args, **kwargs) def __prepare__(*args): return expected_ns B = types.new_class("B", (object,)) C = types.new_class("C", (object,), {"metaclass": A}) # The most derived metaclass of D is A rather than type. meta, ns, kwds = types.prepare_class("D", (B, C), {"metaclass": type}) self.assertIs(meta, A) self.assertIs(ns, expected_ns) self.assertEqual(len(kwds), 0) def test_metaclass_derivation(self): # issue1294232: correct metaclass calculation new_calls = [] # to check the order of __new__ calls class AMeta(type): def __new__(mcls, name, bases, ns): new_calls.append('AMeta') return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases): return {} class BMeta(AMeta): def __new__(mcls, name, bases, ns): new_calls.append('BMeta') return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases): ns = super().__prepare__(name, bases) ns['BMeta_was_here'] = True return ns A = types.new_class("A", (), {"metaclass": AMeta}) self.assertEqual(new_calls, ['AMeta']) new_calls.clear() B = types.new_class("B", (), {"metaclass": BMeta}) # BMeta.__new__ calls AMeta.__new__ with super: self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() C = types.new_class("C", (A, B)) # The most derived metaclass is BMeta: self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() # BMeta.__prepare__ should've been called: self.assertIn('BMeta_was_here', C.__dict__) # The order of the bases shouldn't matter: C2 = types.new_class("C2", (B, A)) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', C2.__dict__) # Check correct metaclass calculation when a metaclass is declared: D = types.new_class("D", (C,), {"metaclass": type}) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', D.__dict__) E = types.new_class("E", (C,), {"metaclass": AMeta}) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', E.__dict__) def test_metaclass_override_function(self): # Special case: the given metaclass isn't a class, # so there is no metaclass calculation. class A(metaclass=self.Meta): pass marker = object() def func(*args, **kwargs): return marker X = types.new_class("X", (), {"metaclass": func}) Y = types.new_class("Y", (object,), {"metaclass": func}) Z = types.new_class("Z", (A,), {"metaclass": func}) self.assertIs(marker, X) self.assertIs(marker, Y) self.assertIs(marker, Z) def test_metaclass_override_callable(self): # The given metaclass is a class, # but not a descendant of type. new_calls = [] # to check the order of __new__ calls prepare_calls = [] # to track __prepare__ calls class ANotMeta: def __new__(mcls, *args, **kwargs): new_calls.append('ANotMeta') return super().__new__(mcls) @classmethod def __prepare__(mcls, name, bases): prepare_calls.append('ANotMeta') return {} class BNotMeta(ANotMeta): def __new__(mcls, *args, **kwargs): new_calls.append('BNotMeta') return super().__new__(mcls) @classmethod def __prepare__(mcls, name, bases): prepare_calls.append('BNotMeta') return super().__prepare__(name, bases) A = types.new_class("A", (), {"metaclass": ANotMeta}) self.assertIs(ANotMeta, type(A)) self.assertEqual(prepare_calls, ['ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['ANotMeta']) new_calls.clear() B = types.new_class("B", (), {"metaclass": BNotMeta}) self.assertIs(BNotMeta, type(B)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() C = types.new_class("C", (A, B)) self.assertIs(BNotMeta, type(C)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() C2 = types.new_class("C2", (B, A)) self.assertIs(BNotMeta, type(C2)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() # This is a TypeError, because of a metaclass conflict: # BNotMeta is neither a subclass, nor a superclass of type with self.assertRaises(TypeError): D = types.new_class("D", (C,), {"metaclass": type}) E = types.new_class("E", (C,), {"metaclass": ANotMeta}) self.assertIs(BNotMeta, type(E)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() F = types.new_class("F", (object(), C)) self.assertIs(BNotMeta, type(F)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() F2 = types.new_class("F2", (C, object())) self.assertIs(BNotMeta, type(F2)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() # TypeError: BNotMeta is neither a # subclass, nor a superclass of int with self.assertRaises(TypeError): X = types.new_class("X", (C, int())) with self.assertRaises(TypeError): X = types.new_class("X", (int(), C)) class SimpleNamespaceTests(unittest.TestCase): def test_constructor(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2) ns3 = types.SimpleNamespace(**dict(x=1, y=2)) with self.assertRaises(TypeError): types.SimpleNamespace(1, 2, 3) self.assertEqual(len(ns1.__dict__), 0) self.assertEqual(vars(ns1), {}) self.assertEqual(len(ns2.__dict__), 2) self.assertEqual(vars(ns2), {'y': 2, 'x': 1}) self.assertEqual(len(ns3.__dict__), 2) self.assertEqual(vars(ns3), {'y': 2, 'x': 1}) def test_unbound(self): ns1 = vars(types.SimpleNamespace()) ns2 = vars(types.SimpleNamespace(x=1, y=2)) self.assertEqual(ns1, {}) self.assertEqual(ns2, {'y': 2, 'x': 1}) def test_underlying_dict(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2) ns3 = types.SimpleNamespace(a=True, b=False) mapping = ns3.__dict__ del ns3 self.assertEqual(ns1.__dict__, {}) self.assertEqual(ns2.__dict__, {'y': 2, 'x': 1}) self.assertEqual(mapping, dict(a=True, b=False)) def test_attrget(self): ns = types.SimpleNamespace(x=1, y=2, w=3) self.assertEqual(ns.x, 1) self.assertEqual(ns.y, 2) self.assertEqual(ns.w, 3) with self.assertRaises(AttributeError): ns.z def test_attrset(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2, w=3) ns1.a = 'spam' ns1.b = 'ham' ns2.z = 4 ns2.theta = None self.assertEqual(ns1.__dict__, dict(a='spam', b='ham')) self.assertEqual(ns2.__dict__, dict(x=1, y=2, w=3, z=4, theta=None)) def test_attrdel(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2, w=3) with self.assertRaises(AttributeError): del ns1.spam with self.assertRaises(AttributeError): del ns2.spam del ns2.y self.assertEqual(vars(ns2), dict(w=3, x=1)) ns2.y = 'spam' self.assertEqual(vars(ns2), dict(w=3, x=1, y='spam')) del ns2.y self.assertEqual(vars(ns2), dict(w=3, x=1)) ns1.spam = 5 self.assertEqual(vars(ns1), dict(spam=5)) del ns1.spam self.assertEqual(vars(ns1), {}) def test_repr(self): ns1 = types.SimpleNamespace(x=1, y=2, w=3) ns2 = types.SimpleNamespace() ns2.x = "spam" ns2._y = 5 self.assertEqual(repr(ns1), "namespace(w=3, x=1, y=2)") self.assertEqual(repr(ns2), "namespace(_y=5, x='spam')") def test_nested(self): ns1 = types.SimpleNamespace(a=1, b=2) ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=ns1) ns2.spam = ns1 ns2.ham = '?' ns2.spam = ns3 self.assertEqual(vars(ns1), dict(a=1, b=2)) self.assertEqual(vars(ns2), dict(spam=ns3, ham='?')) self.assertEqual(ns2.spam, ns3) self.assertEqual(vars(ns3), dict(x=ns1)) self.assertEqual(ns3.x.a, 1) def test_recursive(self): ns1 = types.SimpleNamespace(c='cookie') ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=1) ns1.spam = ns1 ns2.spam = ns3 ns3.spam = ns2 self.assertEqual(ns1.spam, ns1) self.assertEqual(ns1.spam.spam, ns1) self.assertEqual(ns1.spam.spam, ns1.spam) self.assertEqual(ns2.spam, ns3) self.assertEqual(ns3.spam, ns2) self.assertEqual(ns2.spam.spam, ns2) def test_recursive_repr(self): ns1 = types.SimpleNamespace(c='cookie') ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=1) ns1.spam = ns1 ns2.spam = ns3 ns3.spam = ns2 self.assertEqual(repr(ns1), "namespace(c='cookie', spam=namespace(...))") self.assertEqual(repr(ns2), "namespace(spam=namespace(spam=namespace(...), x=1))") def test_as_dict(self): ns = types.SimpleNamespace(spam='spamspamspam') with self.assertRaises(TypeError): len(ns) with self.assertRaises(TypeError): iter(ns) with self.assertRaises(TypeError): 'spam' in ns with self.assertRaises(TypeError): ns['spam'] def test_subclass(self): class Spam(types.SimpleNamespace): pass spam = Spam(ham=8, eggs=9) self.assertIs(type(spam), Spam) self.assertEqual(vars(spam), {'ham': 8, 'eggs': 9}) def test_main(): run_unittest(TypesTests, MappingProxyTests, ClassCreationTests, SimpleNamespaceTests) if __name__ == '__main__': test_main()
TNT-Samuel/Coding-Projects
refs/heads/master
DNS Server/Source - Copy/Lib/tkinter/test/test_tkinter/test_font.py
10
import unittest import tkinter from tkinter import font from test.support import requires, run_unittest, gc_collect from tkinter.test.support import AbstractTkTest requires('gui') fontname = "TkDefaultFont" class FontTest(AbstractTkTest, unittest.TestCase): @classmethod def setUpClass(cls): AbstractTkTest.setUpClass.__func__(cls) try: cls.font = font.Font(root=cls.root, name=fontname, exists=True) except tkinter.TclError: cls.font = font.Font(root=cls.root, name=fontname, exists=False) def test_configure(self): options = self.font.configure() self.assertGreaterEqual(set(options), {'family', 'size', 'weight', 'slant', 'underline', 'overstrike'}) for key in options: self.assertEqual(self.font.cget(key), options[key]) self.assertEqual(self.font[key], options[key]) for key in 'family', 'weight', 'slant': self.assertIsInstance(options[key], str) self.assertIsInstance(self.font.cget(key), str) self.assertIsInstance(self.font[key], str) sizetype = int if self.wantobjects else str for key in 'size', 'underline', 'overstrike': self.assertIsInstance(options[key], sizetype) self.assertIsInstance(self.font.cget(key), sizetype) self.assertIsInstance(self.font[key], sizetype) def test_unicode_family(self): family = 'MS \u30b4\u30b7\u30c3\u30af' try: f = font.Font(root=self.root, family=family, exists=True) except tkinter.TclError: f = font.Font(root=self.root, family=family, exists=False) self.assertEqual(f.cget('family'), family) del f gc_collect() def test_actual(self): options = self.font.actual() self.assertGreaterEqual(set(options), {'family', 'size', 'weight', 'slant', 'underline', 'overstrike'}) for key in options: self.assertEqual(self.font.actual(key), options[key]) for key in 'family', 'weight', 'slant': self.assertIsInstance(options[key], str) self.assertIsInstance(self.font.actual(key), str) sizetype = int if self.wantobjects else str for key in 'size', 'underline', 'overstrike': self.assertIsInstance(options[key], sizetype) self.assertIsInstance(self.font.actual(key), sizetype) def test_name(self): self.assertEqual(self.font.name, fontname) self.assertEqual(str(self.font), fontname) def test_eq(self): font1 = font.Font(root=self.root, name=fontname, exists=True) font2 = font.Font(root=self.root, name=fontname, exists=True) self.assertIsNot(font1, font2) self.assertEqual(font1, font2) self.assertNotEqual(font1, font1.copy()) self.assertNotEqual(font1, 0) def test_measure(self): self.assertIsInstance(self.font.measure('abc'), int) def test_metrics(self): metrics = self.font.metrics() self.assertGreaterEqual(set(metrics), {'ascent', 'descent', 'linespace', 'fixed'}) for key in metrics: self.assertEqual(self.font.metrics(key), metrics[key]) self.assertIsInstance(metrics[key], int) self.assertIsInstance(self.font.metrics(key), int) def test_families(self): families = font.families(self.root) self.assertIsInstance(families, tuple) self.assertTrue(families) for family in families: self.assertIsInstance(family, str) self.assertTrue(family) def test_names(self): names = font.names(self.root) self.assertIsInstance(names, tuple) self.assertTrue(names) for name in names: self.assertIsInstance(name, str) self.assertTrue(name) self.assertIn(fontname, names) tests_gui = (FontTest, ) if __name__ == "__main__": run_unittest(*tests_gui)
mojeto/django
refs/heads/master
django/db/models/fields/related_descriptors.py
19
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from operator import attrgetter from django.db import connections, router, transaction from django.db.models import Q, signals from django.db.models.query import QuerySet from django.utils.functional import cached_property class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel self.cache_name = self.field.get_cache_name() @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `rel.model` might still be # a string model reference. return type( 'RelatedObjectDoesNotExist', (self.field.remote_field.model.DoesNotExist, AttributeError), {} ) def is_cached(self, instance): return hasattr(instance, self.cache_name) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)} else: query = {'%s__in' % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not self.field.remote_field.multiple: rel_obj_cache_name = self.field.remote_field.get_cache_name() for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, rel_obj_cache_name, instance) return queryset, rel_obj_attr, instance_attr, True, self.cache_name def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached in # the attribute defined in self.cache_name. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = getattr(instance, self.cache_name) except AttributeError: val = self.field.get_local_related_value(instance) if None in val: rel_obj = None else: rel_obj = self.get_object(instance) # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not self.field.remote_field.multiple: setattr(rel_obj, self.field.remote_field.get_cache_name(), instance) setattr(instance, self.cache_name, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) elif value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) elif value._state.db is not None and instance._state.db is not None: if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = getattr(instance, self.cache_name, None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: setattr(related, self.field.remote_field.get_cache_name(), None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. setattr(instance, self.cache_name, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not self.field.remote_field.multiple: setattr(value, self.field.remote_field.get_cache_name(), instance) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} return rel_model(**kwargs) return super().get_object(instance) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): self.related = related self.cache_name = related.get_cache_name() @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( 'RelatedObjectDoesNotExist', (self.related.related_model.DoesNotExist, AttributeError), {} ) def is_cached(self, instance): return hasattr(instance, self.cache_name) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = attrgetter(self.related.field.attname) def instance_attr(obj): return obj._get_pk_val() instances_dict = {instance_attr(inst): inst for inst in instances} query = {'%s__in' % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. rel_obj_cache_name = self.related.field.get_cache_name() for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, rel_obj_cache_name, instance) return queryset, rel_obj_attr, instance_attr, True, self.cache_name def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached in # the attribute defined in self.cache_name. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = getattr(instance, self.cache_name) except AttributeError: related_pk = instance._get_pk_val() if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. setattr(rel_obj, self.related.field.get_cache_name(), instance) setattr(instance, self.cache_name, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % ( instance.__class__.__name__, self.related.get_accessor_name() ) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. try: rel_obj = getattr(instance, self.cache_name) except AttributeError: pass else: delattr(instance, self.cache_name) setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) elif value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) elif value._state.db is not None and instance._state.db is not None: if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields) # Set the value of the related field to the value of the related object's related field for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. setattr(instance, self.cache_name, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. setattr(value, self.related.field.get_cache_name(), instance) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( 'reverse side of a related set', self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( 'Direct assignment to the %s is prohibited. Use %s.set() instead.' % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == '' and empty_strings_as_null): return queryset.none() queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}} return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.field.related_query_name()) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.field.related_query_name()] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {'%s__in' % self.field.name: instances} queryset = queryset.filter(**query) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.related_query_name() return queryset, rel_obj_attr, instance_attr, False, cache_name def add(self, *objs, bulk=True): self._remove_prefetched_objects() objs = list(objs) db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError("'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, )) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update(**{ self.field.name: self.instance, }) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True def clear(self, *, bulk=True): self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( '%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError('"%r" needs to have a value for field "%s" before ' 'this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError("%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q(**{self.source_field_name: self.related_val}) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = (not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()) if removed_vals_filters: filters &= Q(**{'%s__in' % self.target_field_name: removed_vals}) if self.symmetrical: symmetrical_filters = Q(**{self.target_field_name: self.related_val}) if removed_vals_filters: symmetrical_filters &= Q( **{'%s__in' % self.source_field_name: removed_vals}) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = {'%s__in' % self.query_field_name: instances} queryset = queryset._next_is_sticky().filter(**query) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra(select={ '_prefetch_related_val_%s' % f.attname: '%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields}) return ( queryset, lambda result: tuple( getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, ) def add(self, *objs): if not rel.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use add() on a ManyToManyField which specifies an " "intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items(self.source_field_name, self.target_field_name, *objs) # If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table if self.symmetrical: self._add_items(self.target_field_name, self.source_field_name, *objs) add.alters_data = True def remove(self, *objs): if not rel.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use remove() on a ManyToManyField which specifies " "an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False): if not rel.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot set values on a ManyToManyField which specifies an " "intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs) else: old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True)) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else obj ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs) set.alters_data = True def create(self, **kwargs): # This check needs to be done here, since we can't later remove this # from the method lookup table, as we do with add and remove. if not self.through._meta.auto_created: opts = self.through._meta raise AttributeError( "Cannot use create() on a ManyToManyField which specifies " "an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name) ) db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj) return new_obj create.alters_data = True def get_or_create(self, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj) return obj, created get_or_create.alters_data = True def update_or_create(self, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj) return obj, created update_or_create.alters_data = True def _add_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. # If there aren't any objects, there is nothing to do. from django.db.models import Model if objs: new_ids = set() for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) fk_val = self.through._meta.get_field( target_field_name).get_foreign_related_value(obj)[0] if fk_val is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) new_ids.add(fk_val) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: new_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) vals = (self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter(**{ source_field_name: self.related_val[0], '%s__in' % target_field_name: new_ids, })) new_ids = new_ids - set(vals) with transaction.atomic(using=db, savepoint=False): if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) # Add the ones that aren't there already self.through._default_manager.using(db).bulk_create([ self.through(**{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: obj_id, }) for obj_id in new_ids ]) if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter(**{ '%s__in' % self.target_field.target_field.attname: old_ids}) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
tudorvio/nova
refs/heads/master
nova/virt/xenapi/vm_utils.py
20
# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ import contextlib import os import time import urllib import uuid from xml.dom import minidom from xml.parsers import expat from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import versionutils import six from six.moves import range import six.moves.urllib.parse as urlparse from nova.api.metadata import base as instance_metadata from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova import exception from nova.i18n import _, _LE, _LI, _LW from nova.network import model as network_model from nova import utils from nova.virt import configdrive from nova.virt import diagnostics from nova.virt.disk import api as disk from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt import netutils from nova.virt.xenapi import agent from nova.virt.xenapi.image import utils as image_utils LOG = logging.getLogger(__name__) xenapi_vm_utils_opts = [ cfg.StrOpt('cache_images', default='all', choices=('all', 'some', 'none'), help='Cache glance images locally. `all` will cache all' ' images, `some` will only cache images that have the' ' image_property `cache_in_nova=True`, and `none` turns' ' off caching entirely'), cfg.IntOpt('image_compression_level', help='Compression level for images, e.g., 9 for gzip -9.' ' Range is 1-9, 9 being most compressed but most CPU' ' intensive on dom0.'), cfg.StrOpt('default_os_type', default='linux', help='Default OS type'), cfg.IntOpt('block_device_creation_timeout', default=10, help='Time to wait for a block device to be created'), cfg.IntOpt('max_kernel_ramdisk_size', default=16 * units.Mi, help='Maximum size in bytes of kernel or ramdisk images'), cfg.StrOpt('sr_matching_filter', default='default-sr:true', help='Filter for finding the SR to be used to install guest ' 'instances on. To use the Local Storage in default ' 'XenServer/XCP installations set this flag to ' 'other-config:i18n-key=local-storage. To select an SR ' 'with a different matching criteria, you could set it to ' 'other-config:my_favorite_sr=true. On the other hand, to ' 'fall back on the Default SR, as displayed by XenCenter, ' 'set this flag to: default-sr:true'), cfg.BoolOpt('sparse_copy', default=True, help='Whether to use sparse_copy for copying data on a ' 'resize down (False will use standard dd). This speeds ' 'up resizes down considerably since large runs of zeros ' 'won\'t have to be rsynced'), cfg.IntOpt('num_vbd_unplug_retries', default=10, help='Maximum number of retries to unplug VBD. if <=0, ' 'should try once and no retry'), cfg.StrOpt('torrent_images', default='none', choices=('all', 'some', 'none'), help='Whether or not to download images via Bit Torrent.'), cfg.StrOpt('ipxe_network_name', help='Name of network to use for booting iPXE ISOs'), cfg.StrOpt('ipxe_boot_menu_url', help='URL to the iPXE boot menu'), cfg.StrOpt('ipxe_mkisofs_cmd', default='mkisofs', help='Name and optionally path of the tool used for ' 'ISO image creation'), ] CONF = cfg.CONF CONF.register_opts(xenapi_vm_utils_opts, 'xenserver') CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('use_ipv6', 'nova.netconf') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE KERNEL_DIR = '/boot/guest' MAX_VDI_CHAIN_SIZE = 16 PROGRESS_INTERVAL_SECONDS = 300 # Fudge factor to allow for the VHD chain to be slightly larger than # the partitioned space. Otherwise, legitimate images near their # maximum allowed size can fail on build with FlavorDiskTooSmall. VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10 class ImageType(object): """Enumeration class for distinguishing different image types | 0 - kernel image (goes on dom0's filesystem) | 1 - ramdisk image (goes on dom0's filesystem) | 2 - disk image (local SR, partitioned by objectstore plugin) | 3 - raw disk image (local SR, NOT partitioned by plugin) | 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for | linux, HVM assumed for Windows) | 5 - ISO disk image (local SR, NOT partitioned by plugin) | 6 - config drive """ KERNEL = 0 RAMDISK = 1 DISK = 2 DISK_RAW = 3 DISK_VHD = 4 DISK_ISO = 5 DISK_CONFIGDRIVE = 6 _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO, DISK_CONFIGDRIVE) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "root" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" DISK_ISO_STR = "iso" DISK_CONFIGDRIVE_STR = "configdrive" _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR, DISK_ISO_STR, DISK_CONFIGDRIVE_STR) @classmethod def to_string(cls, image_type): return dict(zip(cls._ids, ImageType._strs)).get(image_type) @classmethod def get_role(cls, image_type_id): """Get the role played by the image, based on its type.""" return { cls.KERNEL: 'kernel', cls.RAMDISK: 'ramdisk', cls.DISK: 'root', cls.DISK_RAW: 'root', cls.DISK_VHD: 'root', cls.DISK_ISO: 'iso', cls.DISK_CONFIGDRIVE: 'configdrive' }.get(image_type_id) def get_vm_device_id(session, image_properties): # NOTE: device_id should be 2 for windows VMs which run new xentools # (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more # information. if image_properties is None: image_properties = {} device_id = image_properties.get('xenapi_device_id') # The device_id is required to be set for hypervisor version 6.1 and above if device_id: hypervisor_version = session.product_version if _hypervisor_supports_device_id(hypervisor_version): return device_id else: msg = _("Device id %(id)s specified is not supported by " "hypervisor version %(version)s") % {'id': device_id, 'version': hypervisor_version} raise exception.NovaException(msg) def _hypervisor_supports_device_id(version): version_as_string = '.'.join(str(v) for v in version) return(versionutils.is_compatible('6.1', version_as_string)) def create_vm(session, instance, name_label, kernel, ramdisk, use_pv_kernel=False, device_id=None): """Create a VM record. Returns new VM reference. the use_pv_kernel flag indicates whether the guest is HVM or PV There are 3 scenarios: 1. Using paravirtualization, kernel passed in 2. Using paravirtualization, kernel within the image 3. Using hardware virtualization """ flavor = instance.get_flavor() mem = str(long(flavor.memory_mb) * units.Mi) vcpus = str(flavor.vcpus) vcpu_weight = flavor.vcpu_weight vcpu_params = {} if vcpu_weight is not None: # NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means # we need to specify both weight and cap for either to apply vcpu_params = {"weight": str(vcpu_weight), "cap": "0"} cpu_mask_list = hardware.get_vcpu_pin_set() if cpu_mask_list: cpu_mask = hardware.format_cpu_spec(cpu_mask_list, allow_ranges=False) vcpu_params["mask"] = cpu_mask viridian = 'true' if instance['os_type'] == 'windows' else 'false' rec = { 'actions_after_crash': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_shutdown': 'destroy', 'affinity': '', 'blocked_operations': {}, 'ha_always_run': False, 'ha_restart_priority': '', 'HVM_boot_params': {}, 'HVM_boot_policy': '', 'is_a_template': False, 'memory_dynamic_min': mem, 'memory_dynamic_max': mem, 'memory_static_min': '0', 'memory_static_max': mem, 'memory_target': mem, 'name_description': '', 'name_label': name_label, 'other_config': {'nova_uuid': str(instance['uuid'])}, 'PCI_bus': '', 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': viridian, 'timeoffset': '0'}, 'PV_args': '', 'PV_bootloader': '', 'PV_bootloader_args': '', 'PV_kernel': '', 'PV_legacy_args': '', 'PV_ramdisk': '', 'recommendations': '', 'tags': [], 'user_version': '0', 'VCPUs_at_startup': vcpus, 'VCPUs_max': vcpus, 'VCPUs_params': vcpu_params, 'xenstore_data': {'vm-data/allowvssprovider': 'false'}} # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: rec['platform']['nx'] = 'false' if instance['kernel_id']: # 1. Kernel explicitly passed in, use that rec['PV_args'] = 'root=/dev/xvda1' rec['PV_kernel'] = kernel rec['PV_ramdisk'] = ramdisk else: # 2. Use kernel within the image rec['PV_bootloader'] = 'pygrub' else: # 3. Using hardware virtualization rec['platform']['nx'] = 'true' rec['HVM_boot_params'] = {'order': 'dc'} rec['HVM_boot_policy'] = 'BIOS order' if device_id: rec['platform']['device_id'] = device_id vm_ref = session.VM.create(rec) LOG.debug('Created VM', instance=instance) return vm_ref def destroy_vm(session, instance, vm_ref): """Destroys a VM record.""" try: session.VM.destroy(vm_ref) except session.XenAPI.Failure: LOG.exception(_LE('Destroy VM failed')) return LOG.debug("VM destroyed", instance=instance) def clean_shutdown_vm(session, instance, vm_ref): if is_vm_shutdown(session, vm_ref): LOG.warning(_LW("VM already halted, skipping shutdown..."), instance=instance) return True LOG.debug("Shutting down VM (cleanly)", instance=instance) try: session.call_xenapi('VM.clean_shutdown', vm_ref) except session.XenAPI.Failure: LOG.exception(_LE('Shutting down VM (cleanly) failed.')) return False return True def hard_shutdown_vm(session, instance, vm_ref): if is_vm_shutdown(session, vm_ref): LOG.warning(_LW("VM already halted, skipping shutdown..."), instance=instance) return True LOG.debug("Shutting down VM (hard)", instance=instance) try: session.call_xenapi('VM.hard_shutdown', vm_ref) except session.XenAPI.Failure: LOG.exception(_LE('Shutting down VM (hard) failed')) return False return True def is_vm_shutdown(session, vm_ref): state = get_power_state(session, vm_ref) if state == power_state.SHUTDOWN: return True return False def is_enough_free_mem(session, instance): flavor = instance.get_flavor() mem = long(flavor.memory_mb) * units.Mi host_free_mem = long(session.call_xenapi("host.compute_free_memory", session.host_ref)) return host_free_mem >= mem def _should_retry_unplug_vbd(err): # Retry if unplug failed with DEVICE_DETACH_REJECTED # For reasons which we don't understand, # we're seeing the device still in use, even when all processes # using the device should be dead. # Since XenServer 6.2, we also need to retry if we get # INTERNAL_ERROR, as that error goes away when you retry. return (err == 'DEVICE_DETACH_REJECTED' or err == 'INTERNAL_ERROR') def unplug_vbd(session, vbd_ref, this_vm_ref): # make sure that perform at least once max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1 for num_attempt in range(1, max_attempts + 1): try: if num_attempt > 1: greenthread.sleep(1) session.VBD.unplug(vbd_ref, this_vm_ref) return except session.XenAPI.Failure as exc: err = len(exc.details) > 0 and exc.details[0] if err == 'DEVICE_ALREADY_DETACHED': LOG.info(_LI('VBD %s already detached'), vbd_ref) return elif _should_retry_unplug_vbd(err): LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", ' 'attempt %(num_attempt)d/%(max_attempts)d'), {'vbd_ref': vbd_ref, 'num_attempt': num_attempt, 'max_attempts': max_attempts, 'err': err}) else: LOG.exception(_LE('Unable to unplug VBD')) raise exception.StorageError( reason=_('Unable to unplug VBD %s') % vbd_ref) raise exception.StorageError( reason=_('Reached maximum number of retries ' 'trying to unplug VBD %s') % vbd_ref) def destroy_vbd(session, vbd_ref): """Destroy VBD from host database.""" try: session.call_xenapi('VBD.destroy', vbd_ref) except session.XenAPI.Failure: LOG.exception(_LE('Unable to destroy VBD')) raise exception.StorageError( reason=_('Unable to destroy VBD %s') % vbd_ref) def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', read_only=False, bootable=False, osvol=False, empty=False, unpluggable=True): """Create a VBD record and returns its reference.""" vbd_rec = {} vbd_rec['VM'] = vm_ref if vdi_ref is None: vdi_ref = 'OpaqueRef:NULL' vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = str(userdevice) vbd_rec['bootable'] = bootable vbd_rec['mode'] = read_only and 'RO' or 'RW' vbd_rec['type'] = vbd_type vbd_rec['unpluggable'] = unpluggable vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,' ' VDI %(vdi_ref)s ... ', {'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' ' VDI %(vdi_ref)s.', {'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) if osvol: # set osvol=True in other-config to indicate this is an # attached nova (or cinder) volume session.call_xenapi('VBD.add_to_other_config', vbd_ref, 'osvol', 'True') return vbd_ref def attach_cd(session, vm_ref, vdi_ref, userdevice): """Create an empty VBD, then insert the CD.""" vbd_ref = create_vbd(session, vm_ref, None, userdevice, vbd_type='cd', read_only=True, bootable=True, empty=True, unpluggable=False) session.call_xenapi('VBD.insert', vbd_ref, vdi_ref) return vbd_ref def destroy_vdi(session, vdi_ref): try: session.call_xenapi('VDI.destroy', vdi_ref) except session.XenAPI.Failure: msg = "Unable to destroy VDI %s" % vdi_ref LOG.debug(msg, exc_info=True) msg = _("Unable to destroy VDI %s") % vdi_ref LOG.error(msg) raise exception.StorageError(reason=msg) def safe_destroy_vdis(session, vdi_refs): """Tries to destroy the requested VDIs, but ignores any errors.""" for vdi_ref in vdi_refs: try: destroy_vdi(session, vdi_ref) except exception.StorageError: msg = "Ignoring error while destroying VDI: %s" % vdi_ref LOG.debug(msg) def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size, read_only=False): """Create a VDI record and returns its reference.""" vdi_ref = session.call_xenapi("VDI.create", {'name_label': name_label, 'name_description': disk_type, 'SR': sr_ref, 'virtual_size': str(virtual_size), 'type': 'User', 'sharable': False, 'read_only': read_only, 'xenstore_data': {}, 'other_config': _get_vdi_other_config(disk_type, instance=instance), 'sm_config': {}, 'tags': []}) LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,' ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.', {'vdi_ref': vdi_ref, 'name_label': name_label, 'virtual_size': virtual_size, 'read_only': read_only, 'sr_ref': sr_ref}) return vdi_ref @contextlib.contextmanager def _dummy_vm(session, instance, vdi_ref): """This creates a temporary VM so that we can snapshot a VDI. VDI's can't be snapshotted directly since the API expects a `vm_ref`. To work around this, we need to create a temporary VM and then map the VDI to the VM using a temporary VBD. """ name_label = "dummy" vm_ref = create_vm(session, instance, name_label, None, None) try: vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect', read_only=True) try: yield vm_ref finally: try: destroy_vbd(session, vbd_ref) except exception.StorageError: # destroy_vbd() will log error pass finally: destroy_vm(session, instance, vm_ref) def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): """Copy a VDI and return the new VDIs reference. This function differs from the XenAPI `VDI.copy` call in that the copy is atomic and isolated, meaning we don't see half-downloaded images. It accomplishes this by copying the VDI's into a temporary directory and then atomically renaming them into the SR when the copy is completed. The correct long term solution is to fix `VDI.copy` so that it is atomic and isolated. """ with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref: label = "snapshot" with snapshot_attached_here( session, instance, vm_ref, label) as vdi_uuids: imported_vhds = session.call_plugin_serialized( 'workarounds', 'safe_copy_vdis', sr_path=get_sr_path(session, sr_ref=sr_ref), vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack()) root_uuid = imported_vhds['root']['uuid'] # rescan to discover new VHDs scan_default_sr(session) vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid) return vdi_ref def _clone_vdi(session, vdi_to_clone_ref): """Clones a VDI and return the new VDIs reference.""" vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref) LOG.debug('Cloned VDI %(vdi_ref)s from VDI ' '%(vdi_to_clone_ref)s', {'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref}) return vdi_ref def _get_vdi_other_config(disk_type, instance=None): """Return metadata to store in VDI's other_config attribute. `nova_instance_uuid` is used to associate a VDI with a particular instance so that, if it becomes orphaned from an unclean shutdown of a compute-worker, we can safely detach it. """ other_config = {'nova_disk_type': disk_type} # create_vdi may be called simply while creating a volume # hence information about instance may or may not be present if instance: other_config['nova_instance_uuid'] = instance['uuid'] return other_config def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description, instance): existing_other_config = session.call_xenapi('VDI.get_other_config', vdi_ref) session.call_xenapi('VDI.set_name_label', vdi_ref, name_label) session.call_xenapi('VDI.set_name_description', vdi_ref, description) other_config = _get_vdi_other_config(vdi_type, instance=instance) for key, value in six.iteritems(other_config): if key not in existing_other_config: session.call_xenapi( "VDI.add_to_other_config", vdi_ref, key, value) def _vm_get_vbd_refs(session, vm_ref): return session.call_xenapi("VM.get_VBDs", vm_ref) def _vbd_get_rec(session, vbd_ref): return session.call_xenapi("VBD.get_record", vbd_ref) def _vdi_get_rec(session, vdi_ref): return session.call_xenapi("VDI.get_record", vdi_ref) def _vdi_get_uuid(session, vdi_ref): return session.call_xenapi("VDI.get_uuid", vdi_ref) def _vdi_snapshot(session, vdi_ref): return session.call_xenapi("VDI.snapshot", vdi_ref, {}) def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'): """Retrieves the primary VDI for a VM.""" vbd_refs = _vm_get_vbd_refs(session, vm_ref) for vbd_ref in vbd_refs: vbd_rec = _vbd_get_rec(session, vbd_ref) # Convention dictates the primary VDI will be userdevice 0 if vbd_rec['userdevice'] == userdevice: vdi_ref = vbd_rec['VDI'] vdi_rec = _vdi_get_rec(session, vdi_ref) return vdi_ref, vdi_rec raise exception.NovaException(_("No primary VDI found for %s") % vm_ref) def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0): vbd_refs = _vm_get_vbd_refs(session, vm_ref) for vbd_ref in vbd_refs: vbd_rec = _vbd_get_rec(session, vbd_ref) if int(vbd_rec['userdevice']) >= min_userdevice: vdi_ref = vbd_rec['VDI'] yield _vdi_get_uuid(session, vdi_ref) def _try_strip_base_mirror_from_vdi(session, vdi_ref): try: session.call_xenapi("VDI.remove_from_sm_config", vdi_ref, "base_mirror") except session.XenAPI.Failure: LOG.debug("Error while removing sm_config", exc_info=True) def strip_base_mirror_from_vdis(session, vm_ref): # NOTE(johngarbutt) part of workaround for XenServer bug CA-98606 vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) for vbd_ref in vbd_refs: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) _try_strip_base_mirror_from_vdi(session, vdi_ref) def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref): possible_snapshot_parents = vdi_uuid_chain[1:] if len(possible_snapshot_parents) == 0: LOG.debug("No VHD chain.", instance=instance) return snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents, old_snapshots_only=True) number_of_snapshots = len(snapshot_uuids) if number_of_snapshots <= 0: LOG.debug("No snapshots to remove.", instance=instance) return vdi_refs = [session.VDI.get_by_uuid(vdi_uuid) for vdi_uuid in snapshot_uuids] safe_destroy_vdis(session, vdi_refs) # ensure garbage collector has been run _scan_sr(session, sr_ref) LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots, instance=instance) def remove_old_snapshots(session, instance, vm_ref): """See if there is an snapshot present that should be removed.""" LOG.debug("Starting remove_old_snapshots for VM", instance=instance) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) chain = _walk_vdi_chain(session, vm_vdi_rec['uuid']) vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain] sr_ref = vm_vdi_rec["SR"] _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref) @contextlib.contextmanager def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0', post_snapshot_callback=None): # impl method allow easier patching for tests return _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback) def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback): """Snapshot the root disk only. Return a list of uuids for the vhds in the chain. """ LOG.debug("Starting snapshot for VM", instance=instance) # Memorize the VDI chain so we can poll for coalesce vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref, userdevice) chain = _walk_vdi_chain(session, vm_vdi_rec['uuid']) vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain] sr_ref = vm_vdi_rec["SR"] # clean up after any interrupted snapshot attempts _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref) snapshot_ref = _vdi_snapshot(session, vm_vdi_ref) if post_snapshot_callback is not None: post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD) try: # When the VDI snapshot is taken a new parent is introduced. # If we have taken a snapshot before, the new parent can be coalesced. # We need to wait for this to happen before trying to copy the chain. _wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref, vdi_uuid_chain) snapshot_uuid = _vdi_get_uuid(session, snapshot_ref) chain = _walk_vdi_chain(session, snapshot_uuid) vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain] yield vdi_uuids finally: safe_destroy_vdis(session, [snapshot_ref]) # TODO(johngarbut) we need to check the snapshot has been coalesced # now its associated VDI has been deleted. def get_sr_path(session, sr_ref=None): """Return the path to our storage repository This is used when we're dealing with VHDs directly, either by taking snapshots or by restoring an image in the DISK_VHD format. """ if sr_ref is None: sr_ref = safe_find_sr(session) pbd_rec = session.call_xenapi("PBD.get_all_records_where", 'field "host"="%s" and ' 'field "SR"="%s"' % (session.host_ref, sr_ref)) # NOTE(bobball): There can only be one PBD for a host/SR pair, but path is # not always present - older versions of XS do not set it. pbd_ref = pbd_rec.keys()[0] device_config = pbd_rec[pbd_ref]['device_config'] if 'path' in device_config: return device_config['path'] sr_rec = session.call_xenapi("SR.get_record", sr_ref) sr_uuid = sr_rec["uuid"] if sr_rec["type"] not in ["ext", "nfs"]: raise exception.NovaException( _("Only file-based SRs (ext/NFS) are supported by this feature." " SR %(uuid)s is of type %(type)s") % {"uuid": sr_uuid, "type": sr_rec["type"]}) return os.path.join(CONF.xenserver.sr_base_path, sr_uuid) def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False): """Destroy used or unused cached images. A cached image that is being used by at least one VM is said to be 'used'. In the case of an 'unused' image, the cached image will be the only descendent of the base-copy. So when we delete the cached-image, the refcount will drop to zero and XenServer will automatically destroy the base-copy for us. The default behavior of this function is to destroy only 'unused' cached images. To destroy all cached images, use the `all_cached=True` kwarg. """ cached_images = _find_cached_images(session, sr_ref) destroyed = set() def destroy_cached_vdi(vdi_uuid, vdi_ref): LOG.debug("Destroying cached VDI '%(vdi_uuid)s'") if not dry_run: destroy_vdi(session, vdi_ref) destroyed.add(vdi_uuid) for vdi_ref in cached_images.values(): vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref) if all_cached: destroy_cached_vdi(vdi_uuid, vdi_ref) continue # Unused-Only: Search for siblings # Chain length greater than two implies a VM must be holding a ref to # the base-copy (otherwise it would have coalesced), so consider this # cached image used. chain = list(_walk_vdi_chain(session, vdi_uuid)) if len(chain) > 2: continue elif len(chain) == 2: # Siblings imply cached image is used root_vdi_rec = chain[-1] children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']]) if len(children) > 1: continue destroy_cached_vdi(vdi_uuid, vdi_ref) return destroyed def _find_cached_images(session, sr_ref): """Return a dict(uuid=vdi_ref) representing all cached images.""" cached_images = {} for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref): try: image_id = vdi_rec['other_config']['image-id'] except KeyError: continue cached_images[image_id] = vdi_ref return cached_images def _find_cached_image(session, image_id, sr_ref): """Returns the vdi-ref of the cached image.""" name_label = _get_image_vdi_label(image_id) recs = session.call_xenapi("VDI.get_all_records_where", 'field "name__label"="%s"' % name_label) number_found = len(recs) if number_found > 0: if number_found > 1: LOG.warning(_LW("Multiple base images for image: %s"), image_id) return recs.keys()[0] def _get_resize_func_name(session): brand = session.product_brand version = session.product_version # To maintain backwards compatibility. All recent versions # should use VDI.resize if version and brand: xcp = brand == 'XCP' r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1 xenserver = brand == 'XenServer' r6_or_above = version[0] > 5 if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above): return 'VDI.resize_online' return 'VDI.resize' def _vdi_get_virtual_size(session, vdi_ref): size = session.call_xenapi('VDI.get_virtual_size', vdi_ref) return int(size) def _vdi_resize(session, vdi_ref, new_size): resize_func_name = _get_resize_func_name(session) session.call_xenapi(resize_func_name, vdi_ref, str(new_size)) def update_vdi_virtual_size(session, instance, vdi_ref, new_gb): virtual_size = _vdi_get_virtual_size(session, vdi_ref) new_disk_size = new_gb * units.Gi msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d " "to %(new_disk_size)d") LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size, 'new_disk_size': new_disk_size}, instance=instance) if virtual_size < new_disk_size: # For resize up. Simple VDI resize will do the trick _vdi_resize(session, vdi_ref, new_disk_size) elif virtual_size == new_disk_size: LOG.debug("No need to change vdi virtual size.", instance=instance) else: # NOTE(johngarbutt): we should never get here # but if we don't raise an exception, a user might be able to use # more storage than allowed by their chosen instance flavor msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger " "than flavor size of %(new_disk_size)d bytes.") msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size, 'new_disk_size': new_disk_size} LOG.debug(msg, instance=instance) raise exception.ResizeError(reason=msg) def resize_disk(session, instance, vdi_ref, flavor): size_gb = flavor.root_gb if size_gb == 0: reason = _("Can't resize a disk to 0 GB.") raise exception.ResizeError(reason=reason) sr_ref = safe_find_sr(session) clone_ref = _clone_vdi(session, vdi_ref) try: # Resize partition and filesystem down _auto_configure_disk(session, clone_ref, size_gb) # Create new VDI vdi_size = size_gb * units.Gi # NOTE(johannes): No resizing allowed for rescue instances, so # using instance['name'] is safe here new_ref = create_vdi(session, sr_ref, instance, instance['name'], 'root', vdi_size) new_uuid = session.call_xenapi('VDI.get_uuid', new_ref) # Manually copy contents over virtual_size = size_gb * units.Gi _copy_partition(session, clone_ref, new_ref, 1, virtual_size) return new_ref, new_uuid finally: destroy_vdi(session, clone_ref) def _auto_configure_disk(session, vdi_ref, new_gb): """Partition and resize FS to match the size specified by flavors.root_gb. This is a fail-safe to prevent accidentally destroying data on a disk erroneously marked as auto_disk_config=True. The criteria for allowing resize are: 1. 'auto_disk_config' must be true for the instance (and image). (If we've made it here, then auto_disk_config=True.) 2. The disk must have only one partition. 3. The file-system on the one partition must be ext3 or ext4. """ if new_gb == 0: LOG.debug("Skipping auto_config_disk as destination size is 0GB") return with vdi_attached_here(session, vdi_ref, read_only=False) as dev: partitions = _get_partitions(dev) if len(partitions) != 1: reason = _('Disk must have only one partition.') raise exception.CannotResizeDisk(reason=reason) num, start, old_sectors, fstype, name, flags = partitions[0] if fstype not in ('ext3', 'ext4'): reason = _('Disk contains a filesystem ' 'we are unable to resize: %s') raise exception.CannotResizeDisk(reason=(reason % fstype)) if num != 1: reason = _('The only partition should be partition 1.') raise exception.CannotResizeDisk(reason=reason) new_sectors = new_gb * units.Gi / SECTOR_SIZE _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags) def try_auto_configure_disk(session, vdi_ref, new_gb): try: _auto_configure_disk(session, vdi_ref, new_gb) except exception.CannotResizeDisk as e: msg = _LW('Attempted auto_configure_disk failed because: %s') LOG.warn(msg % e) def _make_partition(session, dev, partition_start, partition_end): dev_path = utils.make_dev_path(dev) # NOTE(bobball) If this runs in Dom0, parted will error trying # to re-read the partition table and return a generic error utils.execute('parted', '--script', dev_path, 'mklabel', 'msdos', run_as_root=True, check_exit_code=not session.is_local_connection) utils.execute('parted', '--script', dev_path, '--', 'mkpart', 'primary', partition_start, partition_end, run_as_root=True, check_exit_code=not session.is_local_connection) partition_path = utils.make_dev_path(dev, partition=1) if session.is_local_connection: # Need to refresh the partitions utils.trycmd('kpartx', '-a', dev_path, run_as_root=True, discard_warnings=True) # Sometimes the partition gets created under /dev/mapper, depending # on the setup in dom0. mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path) if os.path.exists(mapper_path): return mapper_path return partition_path def _generate_disk(session, instance, vm_ref, userdevice, name_label, disk_type, size_mb, fs_type): """Steps to programmatically generate a disk: 1. Create VDI of desired size 2. Attach VDI to compute worker 3. Create partition 4. Create VBD between instance VM and VDI """ # 1. Create VDI sr_ref = safe_find_sr(session) ONE_MEG = units.Mi virtual_size = size_mb * ONE_MEG vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size) try: # 2. Attach VDI to compute worker (VBD hotplug) with vdi_attached_here(session, vdi_ref, read_only=False) as dev: # 3. Create partition partition_start = "0" partition_end = "-0" partition_path = _make_partition(session, dev, partition_start, partition_end) if fs_type == 'linux-swap': utils.execute('mkswap', partition_path, run_as_root=True) elif fs_type is not None: utils.execute('mkfs', '-t', fs_type, partition_path, run_as_root=True) # 4. Create VBD between instance VM and VDI if vm_ref: create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False) except Exception: with excutils.save_and_reraise_exception(): msg = "Error while generating disk number: %s" % userdevice LOG.debug(msg, instance=instance, exc_info=True) safe_destroy_vdis(session, [vdi_ref]) return vdi_ref def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb): # NOTE(jk0): We use a FAT32 filesystem for the Windows swap # partition because that is what parted supports. is_windows = instance['os_type'] == "windows" fs_type = "vfat" if is_windows else "linux-swap" _generate_disk(session, instance, vm_ref, userdevice, name_label, 'swap', swap_mb, fs_type) def get_ephemeral_disk_sizes(total_size_gb): if not total_size_gb: return max_size_gb = 2000 if total_size_gb % 1024 == 0: max_size_gb = 1024 left_to_allocate = total_size_gb while left_to_allocate > 0: size_gb = min(max_size_gb, left_to_allocate) yield size_gb left_to_allocate -= size_gb def generate_single_ephemeral(session, instance, vm_ref, userdevice, size_gb, instance_name_label=None): if instance_name_label is None: instance_name_label = instance["name"] name_label = "%s ephemeral" % instance_name_label # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here label_number = int(userdevice) - 4 if label_number > 0: name_label = "%s (%d)" % (name_label, label_number) return _generate_disk(session, instance, vm_ref, str(userdevice), name_label, 'ephemeral', size_gb * 1024, CONF.default_ephemeral_format) def generate_ephemeral(session, instance, vm_ref, first_userdevice, instance_name_label, total_size_gb): # NOTE(johngarbutt): max possible size of a VHD disk is 2043GB sizes = get_ephemeral_disk_sizes(total_size_gb) first_userdevice = int(first_userdevice) vdi_refs = [] try: for userdevice, size_gb in enumerate(sizes, start=first_userdevice): ref = generate_single_ephemeral(session, instance, vm_ref, userdevice, size_gb, instance_name_label) vdi_refs.append(ref) except Exception as exc: with excutils.save_and_reraise_exception(): LOG.debug("Error when generating ephemeral disk. " "Device: %(userdevice)s Size GB: %(size_gb)s " "Error: %(exc)s", { 'userdevice': userdevice, 'size_gb': size_gb, 'exc': exc}) safe_destroy_vdis(session, vdi_refs) def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice, name_label, size_gb): _generate_disk(session, instance, vm_ref, userdevice, name_label, 'user', size_gb * 1024, CONF.default_ephemeral_format) def generate_configdrive(session, instance, vm_ref, userdevice, network_info, admin_password=None, files=None): sr_ref = safe_find_sr(session) vdi_ref = create_vdi(session, sr_ref, instance, 'config-2', 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES) try: with vdi_attached_here(session, vdi_ref, read_only=False) as dev: extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with utils.tempdir() as tmp_path: tmp_file = os.path.join(tmp_path, 'configdrive') cdb.make_drive(tmp_file) dev_path = utils.make_dev_path(dev) utils.execute('dd', 'if=%s' % tmp_file, 'of=%s' % dev_path, 'oflag=direct,sync', run_as_root=True) create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False, read_only=True) except Exception: with excutils.save_and_reraise_exception(): msg = "Error while generating config drive" LOG.debug(msg, instance=instance, exc_info=True) safe_destroy_vdis(session, [vdi_ref]) def _create_kernel_image(context, session, instance, name_label, image_id, image_type): """Creates kernel/ramdisk file from the image stored in the cache. If the image is not present in the cache, it streams it from glance. Returns: A list of dictionaries that describe VDIs """ filename = "" if CONF.xenserver.cache_images: args = {} args['cached-image'] = image_id args['new-image-uuid'] = str(uuid.uuid4()) filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args) if filename == "": return _fetch_disk_image(context, session, instance, name_label, image_id, image_type) else: vdi_type = ImageType.to_string(image_type) return {vdi_type: dict(uuid=None, file=filename)} def create_kernel_and_ramdisk(context, session, instance, name_label): kernel_file = None ramdisk_file = None if instance['kernel_id']: vdis = _create_kernel_image(context, session, instance, name_label, instance['kernel_id'], ImageType.KERNEL) kernel_file = vdis['kernel'].get('file') if instance['ramdisk_id']: vdis = _create_kernel_image(context, session, instance, name_label, instance['ramdisk_id'], ImageType.RAMDISK) ramdisk_file = vdis['ramdisk'].get('file') return kernel_file, ramdisk_file def destroy_kernel_ramdisk(session, instance, kernel, ramdisk): args = {} if kernel: args['kernel-file'] = kernel if ramdisk: args['ramdisk-file'] = ramdisk if args: LOG.debug("Removing kernel/ramdisk files from dom0", instance=instance) session.call_plugin('kernel', 'remove_kernel_ramdisk', args) def _get_image_vdi_label(image_id): return 'Glance Image %s' % image_id def _create_cached_image(context, session, instance, name_label, image_id, image_type): sr_ref = safe_find_sr(session) sr_type = session.call_xenapi('SR.get_type', sr_ref) if CONF.use_cow_images and sr_type != "ext": LOG.warning(_LW("Fast cloning is only supported on default local SR " "of type ext. SR on this system was found to be of " "type %s. Ignoring the cow flag."), sr_type) @utils.synchronized('xenapi-image-cache' + image_id) def _create_cached_image_impl(context, session, instance, name_label, image_id, image_type, sr_ref): cache_vdi_ref = _find_cached_image(session, image_id, sr_ref) downloaded = False if cache_vdi_ref is None: downloaded = True vdis = _fetch_image(context, session, instance, name_label, image_id, image_type) cache_vdi_ref = session.call_xenapi( 'VDI.get_by_uuid', vdis['root']['uuid']) session.call_xenapi('VDI.set_name_label', cache_vdi_ref, _get_image_vdi_label(image_id)) session.call_xenapi('VDI.set_name_description', cache_vdi_ref, 'root') session.call_xenapi('VDI.add_to_other_config', cache_vdi_ref, 'image-id', str(image_id)) if CONF.use_cow_images: new_vdi_ref = _clone_vdi(session, cache_vdi_ref) elif sr_type == 'ext': new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, cache_vdi_ref) else: new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref, sr_ref) session.call_xenapi('VDI.set_name_label', new_vdi_ref, '') session.call_xenapi('VDI.set_name_description', new_vdi_ref, '') session.call_xenapi('VDI.remove_from_other_config', new_vdi_ref, 'image-id') vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref) return downloaded, vdi_uuid downloaded, vdi_uuid = _create_cached_image_impl(context, session, instance, name_label, image_id, image_type, sr_ref) vdis = {} vdi_type = ImageType.get_role(image_type) vdis[vdi_type] = dict(uuid=vdi_uuid, file=None) return downloaded, vdis def create_image(context, session, instance, name_label, image_id, image_type): """Creates VDI from the image stored in the local cache. If the image is not present in the cache, it streams it from glance. Returns: A list of dictionaries that describe VDIs """ cache_images = CONF.xenserver.cache_images.lower() # Determine if the image is cacheable if image_type == ImageType.DISK_ISO: cache = False elif cache_images == 'all': cache = True elif cache_images == 'some': sys_meta = utils.instance_sys_meta(instance) try: cache = strutils.bool_from_string(sys_meta['image_cache_in_nova']) except KeyError: cache = False elif cache_images == 'none': cache = False else: LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to" " True"), CONF.xenserver.cache_images) cache = True # Fetch (and cache) the image start_time = timeutils.utcnow() if cache: downloaded, vdis = _create_cached_image(context, session, instance, name_label, image_id, image_type) else: vdis = _fetch_image(context, session, instance, name_label, image_id, image_type) downloaded = True duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) LOG.info(_LI("Image creation data, cacheable: %(cache)s, " "downloaded: %(downloaded)s duration: %(duration).2f secs " "for image %(image_id)s"), {'image_id': image_id, 'cache': cache, 'downloaded': downloaded, 'duration': duration}) for vdi_type, vdi in six.iteritems(vdis): vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid']) _set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type, instance) return vdis def _fetch_image(context, session, instance, name_label, image_id, image_type): """Fetch image from glance based on image type. Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ if image_type == ImageType.DISK_VHD: vdis = _fetch_vhd_image(context, session, instance, image_id) else: vdis = _fetch_disk_image(context, session, instance, name_label, image_id, image_type) for vdi_type, vdi in six.iteritems(vdis): vdi_uuid = vdi['uuid'] LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID" " '%(vdi_uuid)s'", {'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid}, instance=instance) return vdis def _make_uuid_stack(): # NOTE(sirp): The XenAPI plugins run under Python 2.4 # which does not have the `uuid` module. To work around this, # we generate the uuids here (under Python 2.6+) and # pass them as arguments return [str(uuid.uuid4()) for i in range(MAX_VDI_CHAIN_SIZE)] def _image_uses_bittorrent(context, instance): bittorrent = False torrent_images = CONF.xenserver.torrent_images.lower() if torrent_images == 'all': bittorrent = True elif torrent_images == 'some': sys_meta = utils.instance_sys_meta(instance) try: bittorrent = strutils.bool_from_string( sys_meta['image_bittorrent']) except KeyError: pass elif torrent_images == 'none': pass else: LOG.warning(_LW("Invalid value '%s' for torrent_images"), torrent_images) return bittorrent def _default_download_handler(): # TODO(sirp): This should be configurable like upload_handler return importutils.import_object( 'nova.virt.xenapi.image.glance.GlanceStore') def _choose_download_handler(context, instance): if _image_uses_bittorrent(context, instance): return importutils.import_object( 'nova.virt.xenapi.image.bittorrent.BittorrentStore') else: return _default_download_handler() def get_compression_level(): level = CONF.xenserver.image_compression_level if level is not None and (level < 1 or level > 9): LOG.warning(_LW("Invalid value '%d' for image_compression_level"), level) return None return level def _fetch_vhd_image(context, session, instance, image_id): """Tell glance to download an image and put the VHDs into the SR Returns: A list of dictionaries that describe VDIs """ LOG.debug("Asking xapi to fetch vhd image %s", image_id, instance=instance) handler = _choose_download_handler(context, instance) try: vdis = handler.download_image(context, session, instance, image_id) except Exception: default_handler = _default_download_handler() # Using type() instead of isinstance() so instance of subclass doesn't # test as equivalent if type(handler) == type(default_handler): raise LOG.exception(_LE("Download handler '%(handler)s' raised an" " exception, falling back to default handler" " '%(default_handler)s'"), {'handler': handler, 'default_handler': default_handler}) vdis = default_handler.download_image( context, session, instance, image_id) # Ensure we can see the import VHDs as VDIs scan_default_sr(session) vdi_uuid = vdis['root']['uuid'] try: _check_vdi_size(context, session, instance, vdi_uuid) except Exception: with excutils.save_and_reraise_exception(): msg = "Error while checking vdi size" LOG.debug(msg, instance=instance, exc_info=True) for vdi in vdis.values(): vdi_uuid = vdi['uuid'] vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) safe_destroy_vdis(session, [vdi_ref]) return vdis def _get_vdi_chain_size(session, vdi_uuid): """Compute the total size of a VDI chain, starting with the specified VDI UUID. This will walk the VDI chain to the root, add the size of each VDI into the total. """ size_bytes = 0 for vdi_rec in _walk_vdi_chain(session, vdi_uuid): cur_vdi_uuid = vdi_rec['uuid'] vdi_size_bytes = int(vdi_rec['physical_utilisation']) LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' '%(vdi_size_bytes)d', {'cur_vdi_uuid': cur_vdi_uuid, 'vdi_size_bytes': vdi_size_bytes}) size_bytes += vdi_size_bytes return size_bytes def _check_vdi_size(context, session, instance, vdi_uuid): flavor = instance.get_flavor() allowed_size = (flavor.root_gb + VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi if not flavor.root_gb: # root_gb=0 indicates that we're disabling size checks return size = _get_vdi_chain_size(session, vdi_uuid) if size > allowed_size: LOG.error(_LE("Image size %(size)d exceeded flavor " "allowed size %(allowed_size)d"), {'size': size, 'allowed_size': allowed_size}, instance=instance) raise exception.FlavorDiskTooSmall() def _fetch_disk_image(context, session, instance, name_label, image_id, image_type): """Fetch the image from Glance NOTE: Unlike _fetch_vhd_image, this method does not use the Glance plugin; instead, it streams the disks through domU to the VDI directly. Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores image_type_str = ImageType.to_string(image_type) LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s", {'image_id': image_id, 'image_type_str': image_type_str}, instance=instance) if image_type == ImageType.DISK_ISO: sr_ref = _safe_find_iso_sr(session) else: sr_ref = safe_find_sr(session) glance_image = image_utils.GlanceImage(context, image_id) if glance_image.is_raw_tgz(): image = image_utils.RawTGZImage(glance_image) else: image = image_utils.RawImage(glance_image) virtual_size = image.get_size() vdi_size = virtual_size LOG.debug("Size for image %(image_id)s: %(virtual_size)d", {'image_id': image_id, 'virtual_size': virtual_size}, instance=instance) if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and vdi_size > CONF.xenserver.max_kernel_ramdisk_size): max_size = CONF.xenserver.max_kernel_ramdisk_size raise exception.NovaException( _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " "max %(max_size)d bytes") % {'vdi_size': vdi_size, 'max_size': max_size}) vdi_ref = create_vdi(session, sr_ref, instance, name_label, image_type_str, vdi_size) # From this point we have a VDI on Xen host; # If anything goes wrong, we need to remember its uuid. try: filename = None vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) with vdi_attached_here(session, vdi_ref, read_only=False) as dev: _stream_disk( session, image.stream_to, image_type, virtual_size, dev) if image_type in (ImageType.KERNEL, ImageType.RAMDISK): # We need to invoke a plugin for copying the # content of the VDI into the proper path. LOG.debug("Copying VDI %s to /boot/guest on dom0", vdi_ref, instance=instance) args = {} args['vdi-ref'] = vdi_ref # Let the plugin copy the correct number of bytes. args['image-size'] = str(vdi_size) if CONF.xenserver.cache_images: args['cached-image'] = image_id filename = session.call_plugin('kernel', 'copy_vdi', args) # Remove the VDI as it is not needed anymore. destroy_vdi(session, vdi_ref) LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref, instance=instance) vdi_role = ImageType.get_role(image_type) return {vdi_role: dict(uuid=None, file=filename)} else: vdi_role = ImageType.get_role(image_type) return {vdi_role: dict(uuid=vdi_uuid, file=None)} except (session.XenAPI.Failure, IOError, OSError) as e: # We look for XenAPI and OS failures. LOG.exception(_LE("Failed to fetch glance image"), instance=instance) e.args = e.args + ([dict(type=ImageType.to_string(image_type), uuid=vdi_uuid, file=filename)],) raise def determine_disk_image_type(image_meta): """Disk Image Types are used to determine where the kernel will reside within an image. To figure out which type we're dealing with, we use the following rules: 1. If we're using Glance, we can use the image_type field to determine the image_type 2. If we're not using Glance, then we need to deduce this based on whether a kernel_id is specified. """ if not image_meta or 'disk_format' not in image_meta: return None disk_format = image_meta['disk_format'] disk_format_map = { 'ami': ImageType.DISK, 'aki': ImageType.KERNEL, 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD, 'iso': ImageType.DISK_ISO, } try: image_type = disk_format_map[disk_format] except KeyError: raise exception.InvalidDiskFormat(disk_format=disk_format) image_ref = image_meta.get('id') params = { 'image_type_str': ImageType.to_string(image_type), 'image_ref': image_ref } LOG.debug("Detected %(image_type_str)s format for image %(image_ref)s", params) return image_type def determine_vm_mode(instance, disk_image_type): current_mode = vm_mode.get_from_instance(instance) if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM: return current_mode os_type = instance['os_type'] if os_type == "linux": return vm_mode.XEN if os_type == "windows": return vm_mode.HVM # disk_image_type specific default for backwards compatibility if disk_image_type == ImageType.DISK_VHD or \ disk_image_type == ImageType.DISK: return vm_mode.XEN # most images run OK as HVM return vm_mode.HVM def set_vm_name_label(session, vm_ref, name_label): session.call_xenapi("VM.set_name_label", vm_ref, name_label) def list_vms(session): vms = session.call_xenapi("VM.get_all_records_where", 'field "is_control_domain"="false" and ' 'field "is_a_template"="false" and ' 'field "resident_on"="%s"' % session.host_ref) for vm_ref in vms.keys(): yield vm_ref, vms[vm_ref] def lookup_vm_vdis(session, vm_ref): """Look for the VDIs that are attached to the VM.""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) vdi_refs = [] if vbd_refs: for vbd_ref in vbd_refs: try: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) # Test valid VDI vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) LOG.debug('VDI %s is still available', vdi_uuid) vbd_other_config = session.call_xenapi("VBD.get_other_config", vbd_ref) if not vbd_other_config.get('osvol'): # This is not an attached volume vdi_refs.append(vdi_ref) except session.XenAPI.Failure: LOG.exception(_LE('"Look for the VDIs failed')) return vdi_refs def lookup(session, name_label, check_rescue=False): """Look the instance up and return it if available. :param:check_rescue: if True will return the 'name'-rescue vm if it exists, instead of just 'name' """ if check_rescue: result = lookup(session, name_label + '-rescue', False) if result: return result vm_refs = session.call_xenapi("VM.get_by_name_label", name_label) n = len(vm_refs) if n == 0: return None elif n > 1: raise exception.InstanceExists(name=name_label) else: return vm_refs[0] def preconfigure_instance(session, instance, vdi_ref, network_info): """Makes alterations to the image before launching as part of spawn. """ key = str(instance['key_data']) net = netutils.get_injected_network_template(network_info) metadata = instance['metadata'] # As mounting the image VDI is expensive, we only want do it once, # if at all, so determine whether it's required first, and then do # everything mount_required = key or net or metadata if not mount_required: return with vdi_attached_here(session, vdi_ref, read_only=False) as dev: _mounted_processing(dev, key, net, metadata) def lookup_kernel_ramdisk(session, vm): vm_rec = session.call_xenapi("VM.get_record", vm) if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) else: return (None, None) def is_snapshot(session, vm): vm_rec = session.call_xenapi("VM.get_record", vm) if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec: return vm_rec['is_a_template'] and vm_rec['is_a_snapshot'] else: return False def get_power_state(session, vm_ref): xapi_state = session.call_xenapi("VM.get_power_state", vm_ref) return XENAPI_POWER_STATE[xapi_state] def compile_info(session, vm_ref): """Fill record with VM status information.""" power_state = get_power_state(session, vm_ref) max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref) mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref) num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref) return hardware.InstanceInfo(state=power_state, max_mem_kb=long(max_mem) >> 10, mem_kb=long(mem) >> 10, num_cpu=num_cpu) def compile_instance_diagnostics(instance, vm_rec): vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']] vm_power_state = power_state.STATE_MAP[vm_power_state_int] config_drive = configdrive.required_by(instance) diags = diagnostics.Diagnostics(state=vm_power_state, driver='xenapi', config_drive=config_drive) for cpu_num in range(0, long(vm_rec['VCPUs_max'])): diags.add_cpu() for vif in vm_rec['VIFs']: diags.add_nic() for vbd in vm_rec['VBDs']: diags.add_disk() max_mem_bytes = long(vm_rec['memory_dynamic_max']) diags.memory_details.maximum = max_mem_bytes / units.Mi return diags def compile_diagnostics(vm_rec): """Compile VM diagnostics data.""" try: keys = [] diags = {} vm_uuid = vm_rec["uuid"] xml = _get_rrd(_get_rrd_server(), vm_uuid) if xml: rrd = minidom.parseString(xml) for i, node in enumerate(rrd.firstChild.childNodes): # Provide the last update of the information if node.localName == 'lastupdate': diags['last_update'] = node.firstChild.data # Create a list of the diagnostic keys (in their order) if node.localName == 'ds': ref = node.childNodes # Name and Value if len(ref) > 6: keys.append(ref[0].firstChild.data) # Read the last row of the first RRA to get the latest info if node.localName == 'rra': rows = node.childNodes[4].childNodes last_row = rows[rows.length - 1].childNodes for j, value in enumerate(last_row): diags[keys[j]] = value.firstChild.data break return diags except expat.ExpatError as e: LOG.exception(_LE('Unable to parse rrd of %s'), e) return {"Unable to retrieve diagnostics": e} def fetch_bandwidth(session): bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth') return bw def _scan_sr(session, sr_ref=None, max_attempts=4): if sr_ref: # NOTE(johngarbutt) xenapi will collapse any duplicate requests # for SR.scan if there is already a scan in progress. # However, we don't want that, because the scan may have started # before we modified the underlying VHDs on disk through a plugin. # Using our own mutex will reduce cases where our periodic SR scan # in host.update_status starts racing the sr.scan after a plugin call. @utils.synchronized('sr-scan-' + sr_ref) def do_scan(sr_ref): LOG.debug("Scanning SR %s", sr_ref) attempt = 1 while True: try: return session.call_xenapi('SR.scan', sr_ref) except session.XenAPI.Failure as exc: with excutils.save_and_reraise_exception() as ctxt: if exc.details[0] == 'SR_BACKEND_FAILURE_40': if attempt < max_attempts: ctxt.reraise = False LOG.warning(_LW("Retry SR scan due to error: " "%s"), exc) greenthread.sleep(2 ** attempt) attempt += 1 do_scan(sr_ref) def scan_default_sr(session): """Looks for the system default SR and triggers a re-scan.""" sr_ref = safe_find_sr(session) _scan_sr(session, sr_ref) return sr_ref def safe_find_sr(session): """Same as _find_sr except raises a NotFound exception if SR cannot be determined """ sr_ref = _find_sr(session) if sr_ref is None: raise exception.StorageRepositoryNotFound() return sr_ref def _find_sr(session): """Return the storage repository to hold VM images.""" host = session.host_ref try: tokens = CONF.xenserver.sr_matching_filter.split(':') filter_criteria = tokens[0] filter_pattern = tokens[1] except IndexError: # oops, flag is invalid LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect " "formatting convention"), CONF.xenserver.sr_matching_filter) return None if filter_criteria == 'other-config': key, value = filter_pattern.split('=', 1) for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): if not (key in sr_rec['other_config'] and sr_rec['other_config'][key] == value): continue for pbd_ref in sr_rec['PBDs']: pbd_rec = session.get_rec('PBD', pbd_ref) if pbd_rec and pbd_rec['host'] == host: return sr_ref elif filter_criteria == 'default-sr' and filter_pattern == 'true': pool_ref = session.call_xenapi('pool.get_all')[0] sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref) if sr_ref: return sr_ref # No SR found! LOG.error(_LE("XenAPI is unable to find a Storage Repository to " "install guest instances on. Please check your " "configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'.")) return None def _safe_find_iso_sr(session): """Same as _find_iso_sr except raises a NotFound exception if SR cannot be determined """ sr_ref = _find_iso_sr(session) if sr_ref is None: raise exception.NotFound(_('Cannot find SR of content-type ISO')) return sr_ref def _find_iso_sr(session): """Return the storage repository to hold ISO images.""" host = session.host_ref for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): LOG.debug("ISO: looking at SR %s", sr_rec) if not sr_rec['content_type'] == 'iso': LOG.debug("ISO: not iso content") continue if 'i18n-key' not in sr_rec['other_config']: LOG.debug("ISO: iso content_type, no 'i18n-key' key") continue if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': LOG.debug("ISO: iso content_type, i18n-key value not " "'local-storage-iso'") continue LOG.debug("ISO: SR MATCHing our criteria") for pbd_ref in sr_rec['PBDs']: LOG.debug("ISO: ISO, looking to see if it is host local") pbd_rec = session.get_rec('PBD', pbd_ref) if not pbd_rec: LOG.debug("ISO: PBD %s disappeared", pbd_ref) continue pbd_rec_host = pbd_rec['host'] LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s", {'pbd_rec': pbd_rec, 'host': host}) if pbd_rec_host == host: LOG.debug("ISO: SR with local PBD") return sr_ref return None def _get_rrd_server(): """Return server's scheme and address to use for retrieving RRD XMLs.""" xs_url = urlparse.urlparse(CONF.xenserver.connection_url) return [xs_url.scheme, xs_url.netloc] def _get_rrd(server, vm_uuid): """Return the VM RRD XML as a string.""" try: xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % ( server[0], CONF.xenserver.connection_username, CONF.xenserver.connection_password, server[1], vm_uuid)) return xml.read() except IOError: LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with ' 'server details: %(server)s.'), {'vm_uuid': vm_uuid, 'server': server}) return None def _get_all_vdis_in_sr(session, sr_ref): for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref): vdi_rec = session.get_rec('VDI', vdi_ref) # Check to make sure the record still exists. It may have # been deleted between the get_all call and get_rec call if vdi_rec: yield vdi_ref, vdi_rec def get_instance_vdis_for_sr(session, vm_ref, sr_ref): """Return opaqueRef for all the vdis which live on sr.""" for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref): try: vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref): yield vdi_ref except session.XenAPI.Failure: continue def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None): if vdi_rec is None: vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) if 'vhd-parent' not in vdi_rec['sm_config']: return None parent_uuid = vdi_rec['sm_config']['vhd-parent'] vdi_uuid = vdi_rec['uuid'] LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s', {'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid}) return parent_uuid def _walk_vdi_chain(session, vdi_uuid): """Yield vdi_recs for each element in a VDI chain.""" scan_default_sr(session) while True: vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid) vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) yield vdi_rec parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec) if not parent_uuid: break vdi_uuid = parent_uuid def _is_vdi_a_snapshot(vdi_rec): """Ensure VDI is a snapshot, and not cached image.""" is_a_snapshot = vdi_rec['is_a_snapshot'] image_id = vdi_rec['other_config'].get('image-id') return is_a_snapshot and not image_id def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False): """Return the immediate children of a given VHD. This is not recursive, only the immediate children are returned. """ children = set() for ref, rec in _get_all_vdis_in_sr(session, sr_ref): rec_uuid = rec['uuid'] if rec_uuid in vdi_uuid_list: continue parent_uuid = _get_vhd_parent_uuid(session, ref, rec) if parent_uuid not in vdi_uuid_list: continue if old_snapshots_only and not _is_vdi_a_snapshot(rec): continue children.add(rec_uuid) return list(children) def _count_children(session, parent_vdi_uuid, sr_ref): # Search for any other vdi which has the same parent as us to work out # whether we have siblings and therefore if coalesce is possible children = 0 for _ref, rec in _get_all_vdis_in_sr(session, sr_ref): if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid): children = children + 1 return children def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref, vdi_uuid_list): """Spin until the parent VHD is coalesced into one of the VDIs in the list vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over. The use case is there are any number of VDIs between those in vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those in vdi_uuid_list may also be coalesced (except the base UUID - which is guaranteed to remain) """ # If the base disk was a leaf node, there will be no coalescing # after a VDI snapshot. if len(vdi_uuid_list) == 1: LOG.debug("Old chain is single VHD, coalesce not possible.", instance=instance) return # If the parent of the original disk has other children, # there will be no coalesce because of the VDI snapshot. # For example, the first snapshot for an instance that has been # spawned from a cached image, will not coalesce, because of this rule. parent_vdi_uuid = vdi_uuid_list[1] if _count_children(session, parent_vdi_uuid, sr_ref) > 1: LOG.debug("Parent has other children, coalesce is unlikely.", instance=instance) return # When the VDI snapshot is taken, a new parent is created. # Assuming it is not one of the above cases, that new parent # can be coalesced, so we need to wait for that to happen. max_attempts = CONF.xenserver.vhd_coalesce_max_attempts # Remove the leaf node from list, to get possible good parents # when the coalesce has completed. # Its possible that other coalesce operation happen, so we need # to consider the full chain, rather than just the most recent parent. good_parent_uuids = vdi_uuid_list[1:] for i in range(max_attempts): # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config` # matches the underlying VHDs. # This can also kick XenServer into performing a pending coalesce. _scan_sr(session, sr_ref) parent_uuid = _get_vhd_parent_uuid(session, vdi_ref) if parent_uuid and (parent_uuid not in good_parent_uuids): LOG.debug("Parent %(parent_uuid)s not yet in parent list" " %(good_parent_uuids)s, waiting for coalesce...", {'parent_uuid': parent_uuid, 'good_parent_uuids': good_parent_uuids}, instance=instance) else: LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid, instance=instance) return greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval) msg = (_("VHD coalesce attempts exceeded (%d)" ", giving up...") % max_attempts) raise exception.NovaException(msg) def _remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be fixed in future versions: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875 For now, we work around it by just doing a string replace. """ # NOTE(sirp): This hack can go away when we pull support for Maverick should_remap = CONF.xenserver.remap_vbd_dev if not should_remap: return dev old_prefix = 'xvd' new_prefix = CONF.xenserver.remap_vbd_dev_prefix remapped_dev = dev.replace(old_prefix, new_prefix) return remapped_dev def _wait_for_device(dev): """Wait for device node to appear.""" for i in range(0, CONF.xenserver.block_device_creation_timeout): dev_path = utils.make_dev_path(dev) if os.path.exists(dev_path): return time.sleep(1) raise exception.StorageError( reason=_('Timeout waiting for device %s to be created') % dev) def cleanup_attached_vdis(session): """Unplug any instance VDIs left after an unclean restart.""" this_vm_ref = _get_this_vm_ref(session) vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref) for vbd_ref in vbd_refs: try: vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref) except session.XenAPI.Failure as e: if e.details[0] != 'HANDLE_INVALID': raise continue if 'nova_instance_uuid' in vdi_rec['other_config']: # Belongs to an instance and probably left over after an # unclean restart LOG.info(_LI('Disconnecting stale VDI %s from compute domU'), vdi_rec['uuid']) unplug_vbd(session, vbd_ref, this_vm_ref) destroy_vbd(session, vbd_ref) @contextlib.contextmanager def vdi_attached_here(session, vdi_ref, read_only=False): this_vm_ref = _get_this_vm_ref(session) vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect', read_only=read_only, bootable=False) try: LOG.debug('Plugging VBD %s ... ', vbd_ref) session.VBD.plug(vbd_ref, this_vm_ref) try: LOG.debug('Plugging VBD %s done.', vbd_ref) orig_dev = session.call_xenapi("VBD.get_device", vbd_ref) LOG.debug('VBD %(vbd_ref)s plugged as %(orig_dev)s', {'vbd_ref': vbd_ref, 'orig_dev': orig_dev}) dev = _remap_vbd_dev(orig_dev) if dev != orig_dev: LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, ' 'remapping to %(dev)s', {'vbd_ref': vbd_ref, 'dev': dev}) _wait_for_device(dev) yield dev finally: utils.execute('sync', run_as_root=True) LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref) unplug_vbd(session, vbd_ref, this_vm_ref) finally: try: destroy_vbd(session, vbd_ref) except exception.StorageError: # destroy_vbd() will log error pass LOG.debug('Destroying VBD for VDI %s done.', vdi_ref) def _get_sys_hypervisor_uuid(): with file('/sys/hypervisor/uuid') as f: return f.readline().strip() def get_this_vm_uuid(session): if session and session.is_local_connection: # UUID is the control domain running on this host vms = session.call_xenapi("VM.get_all_records_where", 'field "is_control_domain"="true" and ' 'field "resident_on"="%s"' % session.host_ref) return vms[vms.keys()[0]]['uuid'] try: return _get_sys_hypervisor_uuid() except IOError: # Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25) # cannot read from uuid after a reboot. Fall back to trying xenstore. # See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182 domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True) vm_key, _ = utils.execute('xenstore-read', '/local/domain/%s/vm' % domid.strip(), run_as_root=True) return vm_key.strip()[4:] def _get_this_vm_ref(session): return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session)) def _get_partitions(dev): """Return partition information (num, size, type) for a device.""" dev_path = utils.make_dev_path(dev) out, _err = utils.execute('parted', '--script', '--machine', dev_path, 'unit s', 'print', run_as_root=True) lines = [line for line in out.split('\n') if line] partitions = [] LOG.debug("Partitions:") for line in lines[2:]: line = line.rstrip(';') num, start, end, size, fstype, name, flags = line.split(':') num = int(num) start = int(start.rstrip('s')) end = int(end.rstrip('s')) size = int(size.rstrip('s')) LOG.debug(" %(num)s: %(fstype)s %(size)d sectors", {'num': num, 'fstype': fstype, 'size': size}) partitions.append((num, start, size, fstype, name, flags)) return partitions def _stream_disk(session, image_service_func, image_type, virtual_size, dev): offset = 0 if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(session, virtual_size, dev) dev_path = utils.make_dev_path(dev) with utils.temporary_chown(dev_path): with open(dev_path, 'wb') as f: f.seek(offset) image_service_func(f) def _write_partition(session, virtual_size, dev): dev_path = utils.make_dev_path(dev) primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 LOG.debug('Writing partition table %(primary_first)d %(primary_last)d' ' to %(dev_path)s...', {'primary_first': primary_first, 'primary_last': primary_last, 'dev_path': dev_path}) _make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last) LOG.debug('Writing partition table %s done.', dev_path) def _repair_filesystem(partition_path): # Exit Code 1 = File system errors corrected # 2 = File system errors corrected, system needs a reboot utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True, check_exit_code=[0, 1, 2]) def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags): """Resize partition and fileystem. This assumes we are dealing with a single primary partition and using ext3 or ext4. """ size = new_sectors - start end = new_sectors - 1 dev_path = utils.make_dev_path(dev) partition_path = utils.make_dev_path(dev, partition=1) # Replay journal if FS wasn't cleanly unmounted _repair_filesystem(partition_path) # Remove ext3 journal (making it ext2) utils.execute('tune2fs', '-O ^has_journal', partition_path, run_as_root=True) if new_sectors < old_sectors: # Resizing down, resize filesystem before partition resize try: utils.execute('resize2fs', partition_path, '%ds' % size, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.error(six.text_type(exc)) reason = _("Shrinking the filesystem down with resize2fs " "has failed, please check if you have " "enough free space on your disk.") raise exception.ResizeError(reason=reason) utils.execute('parted', '--script', dev_path, 'rm', '1', run_as_root=True) utils.execute('parted', '--script', dev_path, 'mkpart', 'primary', '%ds' % start, '%ds' % end, run_as_root=True) if "boot" in flags.lower(): utils.execute('parted', '--script', dev_path, 'set', '1', 'boot', 'on', run_as_root=True) if new_sectors > old_sectors: # Resizing up, resize filesystem after partition resize utils.execute('resize2fs', partition_path, run_as_root=True) # Add back journal utils.execute('tune2fs', '-j', partition_path, run_as_root=True) def _log_progress_if_required(left, last_log_time, virtual_size): if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS): last_log_time = timeutils.utcnow() complete_pct = float(virtual_size - left) / virtual_size * 100 LOG.debug("Sparse copy in progress, " "%(complete_pct).2f%% complete. " "%(left)s bytes left to copy", {"complete_pct": complete_pct, "left": left}) return last_log_time def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096): """Copy data, skipping long runs of zeros to create a sparse file.""" start_time = last_log_time = timeutils.utcnow() EMPTY_BLOCK = '\0' * block_size bytes_read = 0 skipped_bytes = 0 left = virtual_size LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " "virtual_size=%(virtual_size)d block_size=%(block_size)d", {'src_path': src_path, 'dst_path': dst_path, 'virtual_size': virtual_size, 'block_size': block_size}) # NOTE(sirp): we need read/write access to the devices; since we don't have # the luxury of shelling out to a sudo'd command, we temporarily take # ownership of the devices. with utils.temporary_chown(src_path): with utils.temporary_chown(dst_path): with open(src_path, "r") as src: with open(dst_path, "w") as dst: data = src.read(min(block_size, left)) while data: if data == EMPTY_BLOCK: dst.seek(block_size, os.SEEK_CUR) left -= block_size bytes_read += block_size skipped_bytes += block_size else: dst.write(data) data_len = len(data) left -= data_len bytes_read += data_len if left <= 0: break data = src.read(min(block_size, left)) greenthread.sleep(0) last_log_time = _log_progress_if_required( left, last_log_time, virtual_size) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) compression_pct = float(skipped_bytes) / bytes_read * 100 LOG.debug("Finished sparse_copy in %(duration).2f secs, " "%(compression_pct).2f%% reduction in size", {'duration': duration, 'compression_pct': compression_pct}) def _copy_partition(session, src_ref, dst_ref, partition, virtual_size): # Part of disk taken up by MBR virtual_size -= MBR_SIZE_BYTES with vdi_attached_here(session, src_ref, read_only=True) as src: src_path = utils.make_dev_path(src, partition=partition) with vdi_attached_here(session, dst_ref, read_only=False) as dst: dst_path = utils.make_dev_path(dst, partition=partition) _write_partition(session, virtual_size, dst) if CONF.xenserver.sparse_copy: _sparse_copy(src_path, dst_path, virtual_size) else: num_blocks = virtual_size / SECTOR_SIZE utils.execute('dd', 'if=%s' % src_path, 'of=%s' % dst_path, 'count=%d' % num_blocks, 'iflag=direct,sync', 'oflag=direct,sync', run_as_root=True) def _mount_filesystem(dev_path, dir): """mounts the device specified by dev_path in dir.""" try: _out, err = utils.execute('mount', '-t', 'ext2,ext3,ext4,reiserfs', dev_path, dir, run_as_root=True) except processutils.ProcessExecutionError as e: err = six.text_type(e) return err def _mounted_processing(device, key, net, metadata): """Callback which runs with the image VDI attached.""" # NB: Partition 1 hardcoded dev_path = utils.make_dev_path(device, partition=1) with utils.tempdir() as tmpdir: # Mount only Linux filesystems, to avoid disturbing NTFS images err = _mount_filesystem(dev_path, tmpdir) if not err: try: # This try block ensures that the umount occurs if not agent.find_guest_agent(tmpdir): # TODO(berrange) passing in a None filename is # rather dubious. We shouldn't be re-implementing # the mount/unmount logic here either, when the # VFSLocalFS impl has direct support for mount # and unmount handling if it were passed a # non-None filename vfs = vfsimpl.VFSLocalFS( imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW), imgdir=tmpdir) LOG.info(_LI('Manipulating interface files directly')) # for xenapi, we don't 'inject' admin_password here, # it's handled at instance startup time, nor do we # support injecting arbitrary files here. disk.inject_data_into_fs(vfs, key, net, metadata, None, None) finally: utils.execute('umount', dev_path, run_as_root=True) else: LOG.info(_LI('Failed to mount filesystem (expected for ' 'non-linux instances): %s'), err) def ensure_correct_host(session): """Ensure we're connected to the host we're running on. This is the required configuration for anything that uses vdi_attached_here. """ this_vm_uuid = get_this_vm_uuid(session) try: session.call_xenapi('VM.get_by_uuid', this_vm_uuid) except session.XenAPI.Failure as exc: if exc.details[0] != 'UUID_INVALID': raise raise Exception(_('This domU must be running on the host ' 'specified by connection_url')) def import_all_migrated_disks(session, instance, import_root=True): root_vdi = None if import_root: root_vdi = _import_migrated_root_disk(session, instance) eph_vdis = _import_migrate_ephemeral_disks(session, instance) return {'root': root_vdi, 'ephemerals': eph_vdis} def _import_migrated_root_disk(session, instance): chain_label = instance['uuid'] vdi_label = instance['name'] return _import_migrated_vhds(session, instance, chain_label, "root", vdi_label) def _import_migrate_ephemeral_disks(session, instance): ephemeral_vdis = {} instance_uuid = instance['uuid'] ephemeral_gb = instance["ephemeral_gb"] disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb) for chain_number, _size in enumerate(disk_sizes, start=1): chain_label = instance_uuid + "_ephemeral_%d" % chain_number vdi_label = "%(name)s ephemeral (%(number)d)" % dict( name=instance['name'], number=chain_number) ephemeral_vdi = _import_migrated_vhds(session, instance, chain_label, "ephemeral", vdi_label) userdevice = 3 + chain_number ephemeral_vdis[str(userdevice)] = ephemeral_vdi return ephemeral_vdis def _import_migrated_vhds(session, instance, chain_label, disk_type, vdi_label): """Move and possibly link VHDs via the XAPI plugin.""" # TODO(johngarbutt) tidy up plugin params imported_vhds = session.call_plugin_serialized( 'migration', 'move_vhds_into_sr', instance_uuid=chain_label, sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack()) # Now we rescan the SR so we find the VHDs scan_default_sr(session) vdi_uuid = imported_vhds['root']['uuid'] vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) # Set name-label so we can find if we need to clean up a failed migration _set_vdi_info(session, vdi_ref, disk_type, vdi_label, disk_type, instance) return {'uuid': vdi_uuid, 'ref': vdi_ref} def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num, ephemeral_number=0): LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d", {'vdi_uuid': vdi_uuid, 'seq_num': seq_num}, instance=instance) chain_label = instance['uuid'] if ephemeral_number: chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number try: # TODO(johngarbutt) tidy up plugin params session.call_plugin_serialized('migration', 'transfer_vhd', instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid, sr_path=sr_path, seq_num=seq_num) except session.XenAPI.Failure: msg = "Failed to transfer vhd to new host" LOG.debug(msg, instance=instance, exc_info=True) raise exception.MigrationError(reason=msg) def vm_ref_or_raise(session, instance_name): vm_ref = lookup(session, instance_name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) return vm_ref def handle_ipxe_iso(session, instance, cd_vdi, network_info): """iPXE ISOs are a mechanism to allow the customer to roll their own image. To use this feature, a service provider needs to configure the appropriate Nova flags, roll an iPXE ISO, then distribute that image to customers via Glance. NOTE: `mkisofs` is not present by default in the Dom0, so the service provider can either add that package manually to Dom0 or include the `mkisofs` binary in the image itself. """ boot_menu_url = CONF.xenserver.ipxe_boot_menu_url if not boot_menu_url: LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to' ' enter URL manually...'), instance=instance) return network_name = CONF.xenserver.ipxe_network_name if not network_name: LOG.warning(_LW('ipxe_network_name not set, user will have to' ' enter IP manually...'), instance=instance) return network = None for vif in network_info: if vif['network']['label'] == network_name: network = vif['network'] break if not network: LOG.warning(_LW("Unable to find network matching '%(network_name)s', " "user will have to enter IP manually..."), {'network_name': network_name}, instance=instance) return sr_path = get_sr_path(session) # Unpack IPv4 network info subnet = [sn for sn in network['subnets'] if sn['version'] == 4][0] ip = subnet['ips'][0] ip_address = ip['address'] netmask = network_model.get_netmask(ip, subnet) gateway = subnet['gateway']['address'] dns = subnet['dns'][0]['address'] try: session.call_plugin_serialized("ipxe", "inject", sr_path, cd_vdi['uuid'], boot_menu_url, ip_address, netmask, gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd) except session.XenAPI.Failure as exc: _type, _method, error = exc.details[:3] if error == 'CommandNotFound': LOG.warning(_LW("ISO creation tool '%s' does not exist."), CONF.xenserver.ipxe_mkisofs_cmd, instance=instance) else: raise def set_other_config_pci(session, vm_ref, params): """Set the pci key of other-config parameter to params.""" other_config = session.call_xenapi("VM.get_other_config", vm_ref) other_config['pci'] = params session.call_xenapi("VM.set_other_config", vm_ref, other_config)
Chapna/EnerWat
refs/heads/master
EnerWat/user/migrations/0005_auto_20150730_0257.py
2
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('user', '0004_auto_20150730_0229'), ] operations = [ migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)), ], ), migrations.AddField( model_name='user', name='is_superuser', field=models.BooleanField(default=False), ), migrations.AddField( model_name='post', name='author', field=models.ForeignKey(to=settings.AUTH_USER_MODEL), ), ]
jacky-young/crosswalk-test-suite
refs/heads/master
misc/webdriver-w3c-tests/element_state/method_test.py
5
# -*- mode: python; fill-column: 100; comment-column: 100; -*- import os import sys import unittest sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) import base_test class GetElementAttributeTest(base_test.WebDriverBaseTest): def test_get_element_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-id-attribute.html")) el = self.driver.find_element_by_css("div") self.assertEqual("myId", el.get_attribute("id")) def test_style_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-style-attribute.html")) el = self.driver.find_element_by_tag_name("div") expected_style = """ font-family: \"Gill Sans Extrabold\",Helvetica,sans-serif; line-height: 1.2; font-weight: bold; """ self.assertEqual(expected_style, el.get_attribute("style")) def test_color_serialization_of_style_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-color-style-attribute.html")) el = self.driver.find_element_by_tag_name("div") self.assertEqual("color: rgba(255, 0, 0, 1.0);", el.get_attribute("style")) def test_true_if_boolean_attribute_present(self): self.driver.get(self.webserver.where_is("element_state/res/input-with-checked-attribute.html")) el = self.driver.find_element_by_tag_name("input") self.assertEqual("true", el.get_attribute("checked")) def test_none_if_boolean_attribute_absent(self): self.driver.get(self.webserver.where_is("element_state/res/input-without-checked-attribute.html")) el = self.driver.find_element_by_tag_name("input") self.assertIsNone(el.get_attribute("checked")) def test_option_with_attribute_value(self): self.driver.get(self.webserver.where_is("element_state/res/option-with-value-attribute.html")) el = self.driver.find_element_by_tag_name("option") self.assertEqual("value1", el.get_attribute("value")) def test_option_without_value_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/option-without-value-attribute.html")) el = self.driver.find_element_by_tag_name("option") self.assertEqual("Value 1", el.get_attribute("value")) def test_a_href_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/a-with-href-attribute.html")) el = self.driver.find_element_by_tag_name("a") self.assertEqual("http://web-platform.test:8000/path#fragment", el.get_attribute("href")) def test_img_src_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/img-with-src-attribute.html")) el = self.driver.find_element_by_tag_name("img") self.assertEqual("http://web-platform.test:8000/images/blue.png", el.get_attribute("src")) def test_custom_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-custom-attribute.html")) el = self.driver.find_element_by_tag_name("div") self.assertEqual("attribute value", el.get_attribute("webdriver-custom-attribute")) def test_attribute_not_present(self): self.driver.get(self.webserver.where_is("element_state/res/element-without-attribute.html")) el = self.driver.find_element_by_tag_name("div") self.assertIsNone(el.get_attribute("class")) if __name__ == "__main__": unittest.main()
wallnerryan/quantum_migrate
refs/heads/master
quantum/tests/unit/nec/test_agent_scheduler.py
4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from quantum.tests.unit.openvswitch import test_agent_scheduler from quantum.tests.unit.nec import test_nec_plugin class NecAgentSchedulerTestCase( test_agent_scheduler.OvsAgentSchedulerTestCase): plugin_str = test_nec_plugin.PLUGIN_NAME class NecDhcpAgentNotifierTestCase( test_agent_scheduler.OvsDhcpAgentNotifierTestCase): plugin_str = test_nec_plugin.PLUGIN_NAME class NecL3AgentNotifierTestCase( test_agent_scheduler.OvsL3AgentNotifierTestCase): plugin_str = test_nec_plugin.PLUGIN_NAME
maxamillion/ansible-modules-extras
refs/heads/devel
network/f5/bigip_sys_db.py
23
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: bigip_sys_db short_description: Manage BIG-IP system database variables description: - Manage BIG-IP system database variables version_added: "2.2" options: key: description: - The database variable to manipulate. required: true state: description: - The state of the variable on the system. When C(present), guarantees that an existing variable is set to C(value). When C(reset) sets the variable back to the default value. At least one of value and state C(reset) are required. required: false default: present choices: - present - reset value: description: - The value to set the key to. At least one of value and state C(reset) are required. required: false notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. - Requires BIG-IP version 12.0.0 or greater extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Set the boot.quiet DB variable on the BIG-IP bigip_sys_db: user: "admin" password: "secret" server: "lb.mydomain.com" key: "boot.quiet" value: "disable" delegate_to: localhost - name: Disable the initial setup screen bigip_sys_db: user: "admin" password: "secret" server: "lb.mydomain.com" key: "setup.run" value: "false" delegate_to: localhost - name: Reset the initial setup screen bigip_sys_db: user: "admin" password: "secret" server: "lb.mydomain.com" key: "setup.run" state: "reset" delegate_to: localhost ''' RETURN = ''' name: description: The key in the system database that was specified returned: changed and success type: string sample: "setup.run" default_value: description: The default value of the key returned: changed and success type: string sample: "true" value: description: The value that you set the key to returned: changed and success type: string sample: "false" ''' try: from f5.bigip import ManagementRoot HAS_F5SDK = True except ImportError: HAS_F5SDK = False class BigIpSysDb(object): def __init__(self, *args, **kwargs): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") self.params = kwargs self.api = ManagementRoot(kwargs['server'], kwargs['user'], kwargs['password'], port=kwargs['server_port']) def flush(self): result = dict() state = self.params['state'] value = self.params['value'] if not state == 'reset' and not value: raise F5ModuleError( "When setting a key, a value must be supplied" ) current = self.read() if self.params['check_mode']: if value == current: changed = False else: changed = True else: if state == "present": changed = self.present() elif state == "reset": changed = self.reset() current = self.read() result.update( name=current.name, default_value=current.defaultValue, value=current.value ) result.update(dict(changed=changed)) return result def read(self): dbs = self.api.tm.sys.dbs.db.load( name=self.params['key'] ) return dbs def present(self): current = self.read() if current.value == self.params['value']: return False current.update(value=self.params['value']) current.refresh() if current.value != self.params['value']: raise F5ModuleError( "Failed to set the DB variable" ) return True def reset(self): current = self.read() default = current.defaultValue if current.value == default: return False current.update(value=default) current.refresh() if current.value != current.defaultValue: raise F5ModuleError( "Failed to reset the DB variable" ) return True def main(): argument_spec = f5_argument_spec() meta_args = dict( key=dict(required=True), state=dict(default='present', choices=['present', 'reset']), value=dict(required=False, default=None) ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) try: obj = BigIpSysDb(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except F5ModuleError as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.f5 import * if __name__ == '__main__': main()
40023154/final0627
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/traceback.py
492
import sys def print_exc(file=sys.stderr): exc = __BRYTHON__.exception_stack[-1] file.write(exc.info) if isinstance(exc, SyntaxError): offset = exc.args[1][2] file.write('\n '+offset*' '+'^') file.write('\n'+exc.__name__) if exc.args: file.write(': '+exc.args[0]) file.write('\n') def format_exc(limit=None,chain=True): exc = __BRYTHON__.exception_stack[-1] res = exc.info+'\n'+exc.__name__ if exc.args: res += ': '+exc.args[0] return res+'\n' def format_exception(_type, value, tb, limit=None, chain=True): return ['%s\n' %_type,'%s\n' %value]
ddico/odoo
refs/heads/master
addons/stock_account/wizard/stock_valuation_layer_revaluation.py
1
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import _, api, fields, models from odoo.exceptions import UserError from odoo.tools import float_is_zero class StockValuationLayerRevaluation(models.TransientModel): _name = 'stock.valuation.layer.revaluation' _description = "Wizard model to reavaluate a stock inventory for a product" _check_company_auto = True @api.model def default_get(self, default_fields): res = super().default_get(default_fields) if res.get('product_id'): product = self.env['product.product'].browse(res['product_id']) accounts = product.product_tmpl_id.get_product_accounts() if product.categ_id.property_cost_method == 'standard': raise UserError(_("You cannot revalue a product with a standard cost method.")) if product.quantity_svl <= 0: raise UserError(_("You cannot revalue a product with an empty or negative stock.")) if 'account_journal_id' not in res and 'account_journal_id' in default_fields: res['account_journal_id'] = accounts['stock_journal'].id return res company_id = fields.Many2one('res.company', "Company", readonly=True, required=True) currency_id = fields.Many2one('res.currency', "Currency", related='company_id.currency_id', required=True) product_id = fields.Many2one('product.product', "Related product", required=True, check_company=True) property_valuation = fields.Selection(related='product_id.categ_id.property_valuation') product_uom_name = fields.Char("Unit of Measure", related='product_id.uom_id.name') current_value_svl = fields.Float("Current Value", related="product_id.value_svl") current_quantity_svl = fields.Float("Current Quantity", related="product_id.quantity_svl") added_value = fields.Monetary("Added value", required=True) new_value = fields.Monetary("New value", compute='_compute_new_value') new_value_by_qty = fields.Monetary("New value by quantity", compute='_compute_new_value') reason = fields.Char("Reason", help="Reason of the revaluation") account_journal_id = fields.Many2one('account.journal', "Journal", required=True, check_company=True) account_id = fields.Many2one('account.account', "Counterpart Account", domain=[('deprecated', '=', False)], required=True, check_company=True) date = fields.Date("Accounting Date") @api.depends('current_value_svl', 'current_quantity_svl', 'added_value') def _compute_new_value(self): for reval in self: reval.new_value = reval.current_value_svl + reval.added_value if not float_is_zero(reval.current_quantity_svl, precision_rounding=self.product_id.uom_id.rounding): reval.new_value_by_qty = reval.new_value / reval.current_quantity_svl else: reval.new_value_by_qty = 0.0 def action_validate_revaluation(self): """ Revaluate the stock for `self.product_id` in `self.company_id`. - Change the stardard price with the new valuation by product unit. - Create a manual stock valuation layer with the `added_value` of `self`. - Distribute the `added_value` on the the remaining_value of layers still in stock (with a remaining quantity) - If the Inventory Valuation of the product category is automated, create related account move. """ self.ensure_one() if self.currency_id.is_zero(self.added_value): raise UserError(_("The added value doesn't have any impact on the stock valuation")) product_id = self.product_id.with_company(self.company_id) remaining_svls = self.env['stock.valuation.layer'].search([ ('product_id', '=', product_id.id), ('remaining_qty', '>', 0), ('company_id', '=', self.company_id.id), ]) # Create a manual stock valuation layer if self.reason: description = _("Manual Stock Valuation: %s.", self.reason) else: description = _("Manual Stock Valuation: No Reason Given.") if product_id.categ_id.property_cost_method == 'average': description += _( " Product cost updated from %(previous)s to %(new_cost)s.", previous=product_id.standard_price, new_cost=product_id.standard_price + self.added_value / self.current_quantity_svl ) revaluation_svl_vals = { 'company_id': self.company_id.id, 'product_id': product_id.id, 'description': description, 'value': self.added_value, 'quantity': 0, } remaining_qty = sum(remaining_svls.mapped('remaining_qty')) remaining_value = self.added_value remaining_value_unit_cost = self.currency_id.round(remaining_value / remaining_qty) for svl in remaining_svls: if float_is_zero(svl.remaining_qty - remaining_qty, precision_rounding=self.product_id.uom_id.rounding): svl.remaining_value += remaining_value else: taken_remaining_value = remaining_value_unit_cost * svl.remaining_qty svl.remaining_value += taken_remaining_value remaining_value -= taken_remaining_value remaining_qty -= svl.remaining_qty revaluation_svl = self.env['stock.valuation.layer'].create(revaluation_svl_vals) # Update the stardard price in case of AVCO if product_id.categ_id.property_cost_method == 'average': product_id.with_context(disable_auto_svl=True).standard_price += self.added_value / self.current_quantity_svl # If the Inventory Valuation of the product category is automated, create related account move. if self.property_valuation != 'real_time': return True accounts = product_id.product_tmpl_id.get_product_accounts() if self.added_value < 0: debit_account_id = self.account_id.id credit_account_id = accounts.get('stock_valuation') and accounts['stock_valuation'].id else: debit_account_id = accounts.get('stock_valuation') and accounts['stock_valuation'].id credit_account_id = self.account_id.id move_vals = { 'journal_id': self.account_journal_id.id or accounts['stock_journal'].id, 'company_id': self.company_id.id, 'ref': _("Revaluation of %s", product_id.display_name), 'stock_valuation_layer_ids': [(6, None, [revaluation_svl.id])], 'date': self.date or fields.Date.today(), 'move_type': 'entry', 'line_ids': [(0, 0, { 'name': _('%(user)s changed stock valuation from %(previous)s to %(new_value)s - %(product)s', user=self.env.user.name, previous=self.current_value_svl, new_value=self.current_value_svl + self.added_value, product=product_id.display_name, ), 'account_id': debit_account_id, 'debit': abs(self.added_value), 'credit': 0, 'product_id': product_id.id, }), (0, 0, { 'name': _('%(user)s changed stock valuation from %(previous)s to %(new_value)s - %(product)s', user=self.env.user.name, previous=self.current_value_svl, new_value=self.current_value_svl + self.added_value, product=product_id.display_name, ), 'account_id': credit_account_id, 'debit': 0, 'credit': abs(self.added_value), 'product_id': product_id.id, })], } account_move = self.env['account.move'].create(move_vals) account_move.post() return True
SPKian/Testing2
refs/heads/master
erpnext/patches/v5_1/default_bom.py
103
from __future__ import unicode_literals import frappe def execute(): frappe.db.sql("""Update `tabItem` as item set default_bom = NULL where not exists(select name from `tabBOM` as bom where item.default_bom = bom.name and bom.docstatus =1 )""")
aszlig/LastWatch
refs/heads/master
setup.py
1
#!/usr/bin/env python import sys import subprocess from distutils.core import setup, Command class Test(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, 'tests.py']) raise SystemExit(errno) setup( name='LastWatch', version='0.4.1', description='Inotify scrobbler for last.fm', author='aszlig', author_email='"^[0-9]+$"@redmoonstudios.de', url='https://redmoonstudios.org/~aszlig/lastfm/', py_modules = ['lastwatch'], scripts=['bin/lastwatch'], requires=['pyinotify', 'pylast', 'mutagen'], cmdclass={'test': Test}, )
ravibhure/ansible
refs/heads/devel
lib/ansible/plugins/action/sros_config.py
79
# # Copyright 2016 Peter Sprygada <psprygada@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import glob import os import re import time from ansible.module_utils.six.moves.urllib.parse import urlsplit from ansible.module_utils._text import to_text from ansible.plugins.action.sros import ActionModule as _ActionModule from ansible.utils.vars import merge_hash PRIVATE_KEYS_RE = re.compile('__.+__') class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._task.args.get('src'): try: self._handle_template() except ValueError as exc: return dict(failed=True, msg=exc.message) result = super(ActionModule, self).run(tmp, task_vars) if self._task.args.get('backup') and result.get('__backup__'): # User requested backup and no error occurred in module. # NOTE: If there is a parameter error, _backup key may not be in results. filepath = self._write_backup(task_vars['inventory_hostname'], result['__backup__']) result['backup_path'] = filepath # strip out any keys that have two leading and two trailing # underscore characters for key in result.keys(): if PRIVATE_KEYS_RE.match(key): del result[key] return result def _get_working_path(self): cwd = self._loader.get_basedir() if self._task._role is not None: cwd = self._task._role._role_path return cwd def _write_backup(self, host, contents): backup_path = self._get_working_path() + '/backup' if not os.path.exists(backup_path): os.mkdir(backup_path) for fn in glob.glob('%s/%s*' % (backup_path, host)): os.remove(fn) tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) filename = '%s/%s_config.%s' % (backup_path, host, tstamp) open(filename, 'w').write(contents) return filename def _handle_template(self): src = self._task.args.get('src') working_path = self._get_working_path() if os.path.isabs(src) or urlsplit('src').scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): raise ValueError('path specified in src not found') try: with open(source, 'r') as f: template_data = to_text(f.read()) except IOError: return dict(failed=True, msg='unable to load src file') # Create a template search path in the following order: # [working_path, self_role_path, dependent_role_paths, dirname(source)] searchpath = [working_path] if self._task._role is not None: searchpath.append(self._task._role._role_path) if hasattr(self._task, "_block:"): dep_chain = self._task._block.get_dep_chain() if dep_chain is not None: for role in dep_chain: searchpath.append(role._role_path) searchpath.append(os.path.dirname(source)) self._templar.environment.loader.searchpath = searchpath self._task.args['src'] = self._templar.template(template_data)
elelianghh/sqlalchemy
refs/heads/master
test/sql/test_insert.py
8
#! coding:utf-8 from sqlalchemy import Column, Integer, MetaData, String, Table,\ bindparam, exc, func, insert, select, column, text from sqlalchemy.dialects import mysql, postgresql from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL,\ assert_raises_message, fixtures, eq_ from sqlalchemy.sql import crud class _InsertTestBase(object): @classmethod def define_tables(cls, metadata): Table('mytable', metadata, Column('myid', Integer), Column('name', String(30)), Column('description', String(30))) Table('myothertable', metadata, Column('otherid', Integer, primary_key=True), Column('othername', String(30))) Table('table_w_defaults', metadata, Column('id', Integer, primary_key=True), Column('x', Integer, default=10), Column('y', Integer, server_default=text('5')), Column('z', Integer, default=lambda: 10) ) class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_generic_insert_bind_params_all_columns(self): table1 = self.tables.mytable self.assert_compile(insert(table1), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') def test_insert_with_values_dict(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack' } self.assert_compile( insert( table1, dict( myid=3, name='jack')), 'INSERT INTO mytable (myid, name) VALUES (:myid, :name)', checkparams=checkparams) def test_insert_with_values_tuple(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack', 'description': 'mydescription' } self.assert_compile(insert(table1, (3, 'jack', 'mydescription')), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)', checkparams=checkparams) def test_insert_with_values_func(self): table1 = self.tables.mytable self.assert_compile(insert(table1, values=dict(myid=func.lala())), 'INSERT INTO mytable (myid) VALUES (lala())') def test_insert_with_user_supplied_bind_params(self): table1 = self.tables.mytable values = { table1.c.myid: bindparam('userid'), table1.c.name: bindparam('username') } self.assert_compile( insert( table1, values), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_insert_values(self): table1 = self.tables.mytable values1 = {table1.c.myid: bindparam('userid')} values2 = {table1.c.name: bindparam('username')} self.assert_compile( insert( table1, values=values1).values(values2), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_prefix_with(self): table1 = self.tables.mytable stmt = table1.insert().\ prefix_with('A', 'B', dialect='mysql').\ prefix_with('C', 'D') self.assert_compile( stmt, 'INSERT C D INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') self.assert_compile( stmt, 'INSERT A B C D INTO mytable (myid, name, description) ' 'VALUES (%s, %s, %s)', dialect=mysql.dialect()) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) self.assert_compile(table.insert(values={}, inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())') self.assert_compile( table.insert( inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())', params={}) def test_insert_returning_not_in_default(self): table1 = self.tables.mytable stmt = table1.insert().returning(table1.c.myid) assert_raises_message( exc.CompileError, "RETURNING is not supported by this dialect's statement compiler.", stmt.compile, dialect=default.DefaultDialect() ) def test_insert_from_select_returning(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel).returning( self.tables.myothertable.c.otherid ) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s RETURNING myothertable.otherid", checkparams={"name_1": "foo"}, dialect="postgresql" ) def test_insert_from_select_select(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_cte_one(self): table1 = self.tables.mytable cte = select([table1.c.name]).where(table1.c.name == 'bar').cte() sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == cte.c.name) ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) WITH anon_1 AS " "(SELECT mytable.name AS name FROM mytable " "WHERE mytable.name = :name_1) " "SELECT mytable.myid, mytable.name FROM mytable, anon_1 " "WHERE mytable.name = anon_1.name", checkparams={"name_1": "bar"} ) def test_insert_from_select_cte_two(self): table1 = self.tables.mytable cte = table1.select().cte("c") stmt = cte.select() ins = table1.insert().from_select(table1.c, stmt) self.assert_compile( ins, "INSERT INTO mytable (myid, name, description) " "WITH c AS (SELECT mytable.myid AS myid, mytable.name AS name, " "mytable.description AS description FROM mytable) " "SELECT c.myid, c.name, c.description FROM c" ) def test_insert_from_select_select_alt_ordering(self): table1 = self.tables.mytable sel = select([table1.c.name, table1.c.myid]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("othername", "otherid"), sel) self.assert_compile( ins, "INSERT INTO myothertable (othername, otherid) " "SELECT mytable.name, mytable.myid FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_no_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel, include_defaults=False) self.assert_compile( ins, "INSERT INTO sometable (id) SELECT mytable.myid " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_with_sql_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, foobar() AS foobar_1 " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_with_python_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=12)) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, :foo AS anon_1 " "FROM mytable WHERE mytable.name = :name_1", # value filled in at execution time checkparams={"name_1": "foo", "foo": None} ) def test_insert_from_select_override_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=12)) table1 = self.tables.mytable sel = select( [table1.c.myid, table1.c.myid.label('q')]).where( table1.c.name == 'foo') ins = table.insert().\ from_select(["id", "foo"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, mytable.myid AS q " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_fn_defaults(self): metadata = MetaData() def foo(ctx): return 12 table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=foo)) table1 = self.tables.mytable sel = select( [table1.c.myid]).where( table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, :foo AS anon_1 " "FROM mytable WHERE mytable.name = :name_1", # value filled in at execution time checkparams={"name_1": "foo", "foo": None} ) def test_insert_mix_select_values_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) assert_raises_message( exc.InvalidRequestError, "This construct already inserts from a SELECT", ins.values, othername="5" ) def test_insert_mix_values_select_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().values(othername="5") assert_raises_message( exc.InvalidRequestError, "This construct already inserts value expressions", ins.from_select, ("otherid", "othername"), sel ) def test_insert_from_select_table(self): table1 = self.tables.mytable ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), table1) # note we aren't checking the number of columns right now self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable", checkparams={} ) def test_insert_from_select_union(self): mytable = self.tables.mytable name = column('name') description = column('desc') sel = select( [name, mytable.c.description], ).union( select([name, description]) ) ins = mytable.insert().\ from_select( [mytable.c.name, mytable.c.description], sel) self.assert_compile( ins, "INSERT INTO mytable (name, description) " "SELECT name, mytable.description FROM mytable " 'UNION SELECT name, "desc"' ) def test_insert_from_select_col_values(self): table1 = self.tables.mytable table2 = self.tables.myothertable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = table2.insert().\ from_select((table2.c.otherid, table2.c.othername), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) class InsertImplicitReturningTest( _InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = postgresql.dialect(implicit_returning=True) def test_insert_select(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s", checkparams={"name_1": "foo"} ) def test_insert_select_return_defaults(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel).\ return_defaults(self.tables.myothertable.c.otherid) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s", checkparams={"name_1": "foo"} ) def test_insert_multiple_values(self): ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, {"othername": "bar"}, ]) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s), " "(%(othername_1)s)", checkparams={ 'othername_1': 'bar', 'othername_0': 'foo'} ) def test_insert_multiple_values_return_defaults(self): # TODO: not sure if this should raise an # error or what ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, {"othername": "bar"}, ]).return_defaults(self.tables.myothertable.c.otherid) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s), " "(%(othername_1)s)", checkparams={ 'othername_1': 'bar', 'othername_0': 'foo'} ) def test_insert_single_list_values(self): ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, ]) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s)", checkparams={'othername_0': 'foo'} ) def test_insert_single_element_values(self): ins = self.tables.myothertable.insert().values( {"othername": "foo"}, ) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername)s) RETURNING myothertable.otherid", checkparams={'othername': 'foo'} ) class EmptyTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_empty_insert_default(self): table1 = self.tables.mytable stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable () VALUES ()') def test_supports_empty_insert_true(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = True stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable DEFAULT VALUES', dialect=dialect) def test_supports_empty_insert_false(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = False stmt = table1.insert().values({}) # hide from 2to3 assert_raises_message( exc.CompileError, "The 'default' dialect with current database version " "settings does not support empty inserts.", stmt.compile, dialect=dialect) def _test_insert_with_empty_collection_values(self, collection): table1 = self.tables.mytable ins = table1.insert().values(collection) self.assert_compile(ins, 'INSERT INTO mytable () VALUES ()', checkparams={}) # empty dict populates on next values call self.assert_compile(ins.values(myid=3), 'INSERT INTO mytable (myid) VALUES (:myid)', checkparams={'myid': 3}) def test_insert_with_empty_list_values(self): self._test_insert_with_empty_collection_values([]) def test_insert_with_empty_dict_values(self): self._test_insert_with_empty_collection_values({}) def test_insert_with_empty_tuple_values(self): self._test_insert_with_empty_collection_values(()) class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_not_supported(self): table1 = self.tables.mytable dialect = default.DefaultDialect() stmt = table1.insert().values([{'myid': 1}, {'myid': 2}]) assert_raises_message( exc.CompileError, "The 'default' dialect with current database version settings " "does not support in-place multirow inserts.", stmt.compile, dialect=dialect) def test_named(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkparams = { 'myid_0': 1, 'myid_1': 2, 'myid_2': 3, 'name_0': 'a', 'name_1': 'c', 'name_2': 'e', 'description_0': 'b', 'description_1': 'd', 'description_2': 'f', } dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True self.assert_compile( table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(:myid_0, :name_0, :description_0), ' '(:myid_1, :name_1, :description_1), ' '(:myid_2, :name_2, :description_2)', checkparams=checkparams, dialect=dialect) def test_positional(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkpositional = (1, 'a', 'b', 2, 'c', 'd', 3, 'e', 'f') dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True dialect.paramstyle = 'format' dialect.positional = True self.assert_compile( table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)', checkpositional=checkpositional, dialect=dialect) def test_positional_w_defaults(self): table1 = self.tables.table_w_defaults values = [ {'id': 1}, {'id': 2}, {'id': 3} ] checkpositional = (1, None, None, 2, None, None, 3, None, None) dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True dialect.paramstyle = 'format' dialect.positional = True self.assert_compile( table1.insert().values(values), "INSERT INTO table_w_defaults (id, x, z) VALUES " "(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)", checkpositional=checkpositional, check_prefetch=[ table1.c.x, table1.c.z, crud._multiparam_column(table1.c.x, 0), crud._multiparam_column(table1.c.z, 0), crud._multiparam_column(table1.c.x, 1), crud._multiparam_column(table1.c.z, 1) ], dialect=dialect) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo_1': 'plainfoo', } self.assert_compile( table.insert().values(values), 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, foobar()), ' '(%(id_1)s, %(data_1)s, %(foo_1)s), ' '(%(id_2)s, %(data_2)s, foobar())', checkparams=checkparams, dialect=postgresql.dialect()) def test_python_scalar_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=10)) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 15}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo': None, # evaluated later 'foo_1': 15, 'foo_2': None # evaluated later } stmt = table.insert().values(values) eq_( dict([ (k, v.type._type_affinity) for (k, v) in stmt.compile(dialect=postgresql.dialect()).binds.items()]), { 'foo': Integer, 'data_2': String, 'id_0': Integer, 'id_2': Integer, 'foo_1': Integer, 'data_1': String, 'id_1': Integer, 'foo_2': Integer, 'data_0': String} ) self.assert_compile( stmt, 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, %(foo)s), ' '(%(id_1)s, %(data_1)s, %(foo_1)s), ' '(%(id_2)s, %(data_2)s, %(foo_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) def test_python_fn_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=lambda: 10)) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 15}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo': None, # evaluated later 'foo_1': 15, 'foo_2': None, # evaluated later } stmt = table.insert().values(values) eq_( dict([ (k, v.type._type_affinity) for (k, v) in stmt.compile(dialect=postgresql.dialect()).binds.items()]), { 'foo': Integer, 'data_2': String, 'id_0': Integer, 'id_2': Integer, 'foo_1': Integer, 'data_1': String, 'id_1': Integer, 'foo_2': Integer, 'data_0': String} ) self.assert_compile( stmt, "INSERT INTO sometable (id, data, foo) VALUES " "(%(id_0)s, %(data_0)s, %(foo)s), " "(%(id_1)s, %(data_1)s, %(foo_1)s), " "(%(id_2)s, %(data_2)s, %(foo_2)s)", checkparams=checkparams, dialect=postgresql.dialect()) def test_sql_functions(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer)) values = [ {"id": 1, "data": "foo", "foo": func.foob()}, {"id": 2, "data": "bar", "foo": func.foob()}, {"id": 3, "data": "bar", "foo": func.bar()}, {"id": 4, "data": "bar", "foo": 15}, {"id": 5, "data": "bar", "foo": func.foob()}, ] checkparams = { 'id_0': 1, 'data_0': 'foo', 'id_1': 2, 'data_1': 'bar', 'id_2': 3, 'data_2': 'bar', 'id_3': 4, 'data_3': 'bar', 'foo_3': 15, 'id_4': 5, 'data_4': 'bar' } self.assert_compile( table.insert().values(values), "INSERT INTO sometable (id, data, foo) VALUES " "(%(id_0)s, %(data_0)s, foob()), " "(%(id_1)s, %(data_1)s, foob()), " "(%(id_2)s, %(data_2)s, bar()), " "(%(id_3)s, %(data_3)s, %(foo_3)s), " "(%(id_4)s, %(data_4)s, foob())", checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', } self.assert_compile( table.insert().values(values), 'INSERT INTO sometable (id, data) VALUES ' '(%(id_0)s, %(data_0)s), ' '(%(id_1)s, %(data_1)s), ' '(%(id_2)s, %(data_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default_absent_value(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1', 'foo': 'plainfoo'}, {'id': 2, 'data': 'data2'}, {'id': 3, 'data': 'data3', 'foo': 'otherfoo'}, ] assert_raises_message( exc.CompileError, "INSERT value for column sometable.foo is explicitly rendered " "as a boundparameter in the VALUES clause; a Python-side value or " "SQL expression is required", table.insert().values(values).compile )
devs1991/test_edx_docmode
refs/heads/master
common/djangoapps/util/memcache.py
251
""" This module provides a KEY_FUNCTION suitable for use with a memcache backend so that we can cache any keys, not just ones that memcache would ordinarily accept """ from django.utils.encoding import smart_str import hashlib import urllib def fasthash(string): """ Hashes `string` into a string representation of a 128-bit digest. """ md4 = hashlib.new("md4") md4.update(string) return md4.hexdigest() def cleaned_string(val): """ Converts `val` to unicode and URL-encodes special characters (including quotes and spaces) """ return urllib.quote_plus(smart_str(val)) def safe_key(key, key_prefix, version): """ Given a `key`, `key_prefix`, and `version`, return a key that is safe to use with memcache. `key`, `key_prefix`, and `version` can be numbers, strings, or unicode. """ # Clean for whitespace and control characters, which # cause memcache to raise an exception key = cleaned_string(key) key_prefix = cleaned_string(key_prefix) version = cleaned_string(version) # Attempt to combine the prefix, version, and key combined = ":".join([key_prefix, version, key]) # If the total length is too long for memcache, hash it if len(combined) > 250: combined = fasthash(combined) # Return the result return combined
lj2007331/lnmp
refs/heads/master
include/check_port.py
7
#!/usr/bin/env python import sys,os,socket def IsOpen(ip,port): socket.setdefaulttimeout(5) s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) try: s.connect((ip,int(port))) s.shutdown(2) print True except: print False if __name__ == '__main__': IsOpen(sys.argv[1],int(sys.argv[2]))
gmr/apiary
refs/heads/master
apiary/mappers/__init__.py
1
""" Mappers are SQLAlchemy Data Objects """ from sqlalchemy.ext.declarative import declarative_base Base = declarative_base()
femmerling/DirMaker
refs/heads/master
box/lib/python2.7/site-packages/migrate/versioning/util/__init__.py
58
#!/usr/bin/env python # -*- coding: utf-8 -*- """.. currentmodule:: migrate.versioning.util""" import warnings import logging from decorator import decorator from pkg_resources import EntryPoint from sqlalchemy import create_engine from sqlalchemy.engine import Engine from sqlalchemy.pool import StaticPool from migrate import exceptions from migrate.versioning.util.keyedinstance import KeyedInstance from migrate.versioning.util.importpath import import_path log = logging.getLogger(__name__) def load_model(dotted_name): """Import module and use module-level variable". :param dotted_name: path to model in form of string: ``some.python.module:Class`` .. versionchanged:: 0.5.4 """ if isinstance(dotted_name, basestring): if ':' not in dotted_name: # backwards compatibility warnings.warn('model should be in form of module.model:User ' 'and not module.model.User', exceptions.MigrateDeprecationWarning) dotted_name = ':'.join(dotted_name.rsplit('.', 1)) return EntryPoint.parse('x=%s' % dotted_name).load(False) else: # Assume it's already loaded. return dotted_name def asbool(obj): """Do everything to use object as bool""" if isinstance(obj, basestring): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError("String is not true/false: %r" % obj) if obj in (True, False): return bool(obj) else: raise ValueError("String is not true/false: %r" % obj) def guess_obj_type(obj): """Do everything to guess object type from string Tries to convert to `int`, `bool` and finally returns if not succeded. .. versionadded: 0.5.4 """ result = None try: result = int(obj) except: pass if result is None: try: result = asbool(obj) except: pass if result is not None: return result else: return obj @decorator def catch_known_errors(f, *a, **kw): """Decorator that catches known api errors .. versionadded: 0.5.4 """ try: return f(*a, **kw) except exceptions.PathFoundError, e: raise exceptions.KnownError("The path %s already exists" % e.args[0]) def construct_engine(engine, **opts): """.. versionadded:: 0.5.4 Constructs and returns SQLAlchemy engine. Currently, there are 2 ways to pass create_engine options to :mod:`migrate.versioning.api` functions: :param engine: connection string or a existing engine :param engine_dict: python dictionary of options to pass to `create_engine` :param engine_arg_*: keyword parameters to pass to `create_engine` (evaluated with :func:`migrate.versioning.util.guess_obj_type`) :type engine_dict: dict :type engine: string or Engine instance :type engine_arg_*: string :returns: SQLAlchemy Engine .. note:: keyword parameters override ``engine_dict`` values. """ if isinstance(engine, Engine): return engine elif not isinstance(engine, basestring): raise ValueError("you need to pass either an existing engine or a database uri") # get options for create_engine if opts.get('engine_dict') and isinstance(opts['engine_dict'], dict): kwargs = opts['engine_dict'] else: kwargs = dict() # DEPRECATED: handle echo the old way echo = asbool(opts.get('echo', False)) if echo: warnings.warn('echo=True parameter is deprecated, pass ' 'engine_arg_echo=True or engine_dict={"echo": True}', exceptions.MigrateDeprecationWarning) kwargs['echo'] = echo # parse keyword arguments for key, value in opts.iteritems(): if key.startswith('engine_arg_'): kwargs[key[11:]] = guess_obj_type(value) log.debug('Constructing engine') # TODO: return create_engine(engine, poolclass=StaticPool, **kwargs) # seems like 0.5.x branch does not work with engine.dispose and staticpool return create_engine(engine, **kwargs) @decorator def with_engine(f, *a, **kw): """Decorator for :mod:`migrate.versioning.api` functions to safely close resources after function usage. Passes engine parameters to :func:`construct_engine` and resulting parameter is available as kw['engine']. Engine is disposed after wrapped function is executed. .. versionadded: 0.6.0 """ url = a[0] engine = construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, Engine): log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() class Memoize: """Memoize(fn) - an instance which acts like fn but memoizes its arguments Will only work on functions with non-mutable arguments ActiveState Code 52201 """ def __init__(self, fn): self.fn = fn self.memo = {} def __call__(self, *args): if not self.memo.has_key(args): self.memo[args] = self.fn(*args) return self.memo[args]
xczswt1993/redis-py
refs/heads/master
docs/conf.py
48
# -*- coding: utf-8 -*- # # redis-py documentation build configuration file, created by # sphinx-quickstart on Fri Feb 8 00:47:08 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'redis-py' copyright = u'2013, Andy McCurdy, Mahdi Yusuf' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.7.2' # The full version, including alpha/beta/rc tags. release = '2.7.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'redis-pydoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'redis-py.tex', u'redis-py Documentation', u'Andy McCurdy, Mahdi Yusuf', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'redis-py', u'redis-py Documentation', [u'Andy McCurdy, Mahdi Yusuf'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'redis-py', u'redis-py Documentation', u'Andy McCurdy, Mahdi Yusuf', 'redis-py', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
vvuk/angle
refs/heads/master
src/tests/deqp_support/generate_case_lists.py
4
#!/usr/bin/python # # Copyright 2015 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # generate_case_lists.py: # Helper script for updating the dEQP case list files, stored in the repo. # Generally only used when the dEQP config changes, or when we roll dEQP. import subprocess import sys import os import shutil import gzip # TODO(jmadill): other platforms os_suffix = '.exe' build_dir = os.path.join('build', 'Debug_x64') def run_deqp(deqp_exe): subprocess.call([deqp_exe, '--deqp-runmode=txt-caselist', '--deqp-gl-context-type=null']) # This stuff is all hard-coded for now. If we need more versatility we can # make some options into command line arguments with default values. script_dir = os.path.dirname(sys.argv[0]) path_to_deqp_exe = os.path.join('..', '..', build_dir) deqp_data_path = os.path.join('third_party', 'deqp', 'data') os.chdir(os.path.join(script_dir, '..')) run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles2_tests' + os_suffix)) run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles3_tests' + os_suffix)) run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_egl_tests' + os_suffix)) def compress_case_list(case_file): with open(os.path.join(deqp_data_path, case_file + '.txt')) as in_handle: data = in_handle.read() in_handle.close() with gzip.open(os.path.join('deqp_support', case_file + '.txt.gz'), 'wb') as out_handle: out_handle.write(data) out_handle.close() compress_case_list('dEQP-GLES2-cases') compress_case_list('dEQP-GLES3-cases') compress_case_list('dEQP-EGL-cases')
SravanthiSinha/edx-platform
refs/heads/master
common/lib/xmodule/xmodule/library_tools.py
154
""" XBlock runtime services for LibraryContentModule """ from django.core.exceptions import PermissionDenied from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator from search.search_engine_base import SearchEngine from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.capa_module import CapaDescriptor def normalize_key_for_search(library_key): """ Normalizes library key for use with search indexing """ return library_key.replace(version_guid=None, branch=None) class LibraryToolsService(object): """ Service that allows LibraryContentModule to interact with libraries in the modulestore. """ def __init__(self, modulestore): self.store = modulestore def _get_library(self, library_key): """ Given a library key like "library-v1:ProblemX+PR0B", return the 'library' XBlock with meta-information about the library. A specific version may be specified. Returns None on error. """ if not isinstance(library_key, LibraryLocator): library_key = LibraryLocator.from_string(library_key) try: return self.store.get_library( library_key, remove_version=False, remove_branch=False, head_validation=False ) except ItemNotFoundError: return None def get_library_version(self, lib_key): """ Get the version (an ObjectID) of the given library. Returns None if the library does not exist. """ library = self._get_library(lib_key) if library: # We need to know the library's version so ensure it's set in library.location.library_key.version_guid assert library.location.library_key.version_guid is not None return library.location.library_key.version_guid return None def create_block_analytics_summary(self, course_key, block_keys): """ Given a CourseKey and a list of (block_type, block_id) pairs, prepare the JSON-ready metadata needed for analytics logging. This is [ {"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]} ] where the main list contains all top-level blocks, and descendants contains a *flat* list of all descendants of the top level blocks, if any. """ def summarize_block(usage_key): """ Basic information about the given block """ orig_key, orig_version = self.store.get_block_original_usage(usage_key) return { "usage_key": unicode(usage_key), "original_usage_key": unicode(orig_key) if orig_key else None, "original_usage_version": unicode(orig_version) if orig_version else None, } result_json = [] for block_key in block_keys: key = course_key.make_usage_key(*block_key) info = summarize_block(key) info['descendants'] = [] try: block = self.store.get_item(key, depth=None) # Load the item and all descendants children = list(getattr(block, "children", [])) while children: child_key = children.pop() child = self.store.get_item(child_key) info['descendants'].append(summarize_block(child_key)) children.extend(getattr(child, "children", [])) except ItemNotFoundError: pass # The block has been deleted result_json.append(info) return result_json def _problem_type_filter(self, library, capa_type): """ Filters library children by capa type""" search_engine = SearchEngine.get_search_engine(index="library_index") if search_engine: filter_clause = { "library": unicode(normalize_key_for_search(library.location.library_key)), "content_type": CapaDescriptor.INDEX_CONTENT_TYPE, "problem_types": capa_type } search_result = search_engine.search(field_dictionary=filter_clause) results = search_result.get('results', []) return [LibraryUsageLocator.from_string(item['data']['id']) for item in results] else: return [key for key in library.children if self._filter_child(key, capa_type)] def _filter_child(self, usage_key, capa_type): """ Filters children by CAPA problem type, if configured """ if usage_key.block_type != "problem": return False descriptor = self.store.get_item(usage_key, depth=0) assert isinstance(descriptor, CapaDescriptor) return capa_type in descriptor.problem_types def can_use_library_content(self, block): """ Determines whether a modulestore holding a course_id supports libraries. """ return self.store.check_supports(block.location.course_key, 'copy_from_template') def update_children(self, dest_block, user_id, user_perms=None, version=None): """ This method is to be used when the library that a LibraryContentModule references has been updated. It will re-fetch all matching blocks from the libraries, and copy them as children of dest_block. The children will be given new block_ids, but the definition ID used should be the exact same definition ID used in the library. This method will update dest_block's 'source_library_version' field to store the version number of the libraries used, so we easily determine if dest_block is up to date or not. """ if user_perms and not user_perms.can_write(dest_block.location.course_key): raise PermissionDenied() if not dest_block.source_library_id: dest_block.source_library_version = "" return source_blocks = [] library_key = dest_block.source_library_key if version: library_key = library_key.replace(branch=ModuleStoreEnum.BranchName.library, version_guid=version) library = self._get_library(library_key) if library is None: raise ValueError("Requested library not found.") if user_perms and not user_perms.can_read(library_key): raise PermissionDenied() filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE) if filter_children: # Apply simple filtering based on CAPA problem types: source_blocks.extend(self._problem_type_filter(library, dest_block.capa_type)) else: source_blocks.extend(library.children) with self.store.bulk_operations(dest_block.location.course_key): dest_block.source_library_version = unicode(library.location.library_key.version_guid) self.store.update_item(dest_block, user_id) head_validation = not version dest_block.children = self.store.copy_from_template( source_blocks, dest_block.location, user_id, head_validation=head_validation ) # ^-- copy_from_template updates the children in the DB # but we must also set .children here to avoid overwriting the DB again def list_available_libraries(self): """ List all known libraries. Returns tuples of (LibraryLocator, display_name) """ return [ (lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name) for lib in self.store.get_libraries() ]
prakxys/flask
refs/heads/master
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/flask/testsuite/test_apps/lib/python2.5/site-packages/site_package/__init__.py
1799
import flask app = flask.Flask(__name__)
EternalDeiwos/Hivemind
refs/heads/master
settings.py
2
""" Django settings for Hivemind project. Generated by 'django-admin startproject' using Django 1.8.5. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '7w@1h+&r#a54w)%gi*f$k0hinhnk8z%-ccix8a@j46^335(&&e' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'Hivemind.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Hivemind.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
Acidburn0zzz/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/third_party/h2/test/test_config.py
39
# -*- coding: utf-8 -*- """ test_config ~~~~~~~~~~~ Test the configuration object. """ import logging import pytest import h2.config class TestH2Config(object): """ Tests of the H2 config object. """ def test_defaults(self): """ The default values of the HTTP/2 config object are sensible. """ config = h2.config.H2Configuration() assert config.client_side assert config.header_encoding is None assert isinstance(config.logger, h2.config.DummyLogger) boolean_config_options = [ 'client_side', 'validate_outbound_headers', 'normalize_outbound_headers', 'validate_inbound_headers', 'normalize_inbound_headers', ] @pytest.mark.parametrize('option_name', boolean_config_options) @pytest.mark.parametrize('value', [None, 'False', 1]) def test_boolean_config_options_reject_non_bools_init( self, option_name, value ): """ The boolean config options raise an error if you try to set a value that isn't a boolean via the initializer. """ with pytest.raises(ValueError): h2.config.H2Configuration(**{option_name: value}) @pytest.mark.parametrize('option_name', boolean_config_options) @pytest.mark.parametrize('value', [None, 'False', 1]) def test_boolean_config_options_reject_non_bools_attr( self, option_name, value ): """ The boolean config options raise an error if you try to set a value that isn't a boolean via attribute setter. """ config = h2.config.H2Configuration() with pytest.raises(ValueError): setattr(config, option_name, value) @pytest.mark.parametrize('option_name', boolean_config_options) @pytest.mark.parametrize('value', [True, False]) def test_boolean_config_option_is_reflected_init(self, option_name, value): """ The value of the boolean config options, when set, is reflected in the value via the initializer. """ config = h2.config.H2Configuration(**{option_name: value}) assert getattr(config, option_name) == value @pytest.mark.parametrize('option_name', boolean_config_options) @pytest.mark.parametrize('value', [True, False]) def test_boolean_config_option_is_reflected_attr(self, option_name, value): """ The value of the boolean config options, when set, is reflected in the value via attribute setter. """ config = h2.config.H2Configuration() setattr(config, option_name, value) assert getattr(config, option_name) == value @pytest.mark.parametrize('header_encoding', [True, 1, object()]) def test_header_encoding_must_be_false_str_none_init( self, header_encoding ): """ The value of the ``header_encoding`` setting must be False, a string, or None via the initializer. """ with pytest.raises(ValueError): h2.config.H2Configuration(header_encoding=header_encoding) @pytest.mark.parametrize('header_encoding', [True, 1, object()]) def test_header_encoding_must_be_false_str_none_attr( self, header_encoding ): """ The value of the ``header_encoding`` setting must be False, a string, or None via attribute setter. """ config = h2.config.H2Configuration() with pytest.raises(ValueError): config.header_encoding = header_encoding @pytest.mark.parametrize('header_encoding', [False, 'ascii', None]) def test_header_encoding_is_reflected_init(self, header_encoding): """ The value of ``header_encoding``, when set, is reflected in the value via the initializer. """ config = h2.config.H2Configuration(header_encoding=header_encoding) assert config.header_encoding == header_encoding @pytest.mark.parametrize('header_encoding', [False, 'ascii', None]) def test_header_encoding_is_reflected_attr(self, header_encoding): """ The value of ``header_encoding``, when set, is reflected in the value via the attribute setter. """ config = h2.config.H2Configuration() config.header_encoding = header_encoding assert config.header_encoding == header_encoding def test_logger_instance_is_reflected(self): """ The value of ``logger``, when set, is reflected in the value. """ logger = logging.Logger('hyper-h2.test') config = h2.config.H2Configuration() config.logger = logger assert config.logger is logger
mwgamera/u413
refs/heads/master
login.py
2
'''u413 - an open-source BBS/terminal/PI-themed forum Copyright (C) 2012 PiMaster Copyright (C) 2012 EnKrypt This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.''' import command import database as db import user def login_func(args,u413): #check for special cases if u413.user.name!="Guest": u413.type("You are logged in as "+u413.user.name+'.') return params=args.split(' ') #LOGIN already requested continuation if "step" in u413.cmddata: if args=="": u413.type("Action cancelled.") u413.set_context("") #USERNAME> if u413.cmddata["step"]==1: u413.cmddata["username"]=params[0] u413.cmddata["step"]=2 u413.type("Enter your password:") u413.set_context("PASSWORD") u413.use_password() u413.continue_cmd() #PASSWORD> elif u413.cmddata["step"]==2: if u413.user.login(u413.cmddata["username"],params[0]): u413.type("You are now logged in as "+u413.user.name+'.') else: u413.type("Invalid username/password.") u413.set_context("") #else left out because it's impossible #First use of LOGIN else: #LOGIN if len(args)==0: u413.cmddata["step"]=1 u413.type("Enter your username:") u413.set_context("USERNAME") u413.continue_cmd() #LOGIN username elif len(params)==1: u413.cmddata["step"]=2 u413.cmddata["username"]=params[0] u413.type("Enter your password:") u413.set_context("PASSWORD") u413.use_password() u413.continue_cmd() #LOGIN username password [ignored args] else: if u413.user.login(params[0],params[1]): u413.type("You are now logged in as "+u413.user.name+'.') else: u413.type("Invalid username/password.") command.Command("LOGIN","[username] [password]",{"username":"Your username","password":"Your password"},"Logs a user onto U413",login_func,0)
with-git/tensorflow
refs/heads/master
tensorflow/python/util/deprecation_test.py
21
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Deprecation tests.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation class DeprecationTest(test.TestCase): @test.mock.patch.object(logging, "warning", autospec=True) def test_silence(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(): pass _fn() self.assertEqual(1, mock_warning.call_count) with deprecation.silence(): _fn() self.assertEqual(1, mock_warning.call_count) _fn() self.assertEqual(2, mock_warning.call_count) def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated("", instructions) with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated("07-04-2016", instructions) date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated(date, None) with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated(date, "") @test.mock.patch.object(logging, "warning", autospec=True) def test_no_date(self, mock_warning): date = None instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed in a future version." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % instructions, _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches( args[0], r"deprecated and will be removed") self._assert_subset(set(["in a future version", instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) def test_prop_wrong_order(self): with self.assertRaisesRegexp( ValueError, "make sure @property appears before @deprecated in your source code"): # pylint: disable=unused-variable class _Object(object): def __init(self): pass @deprecation.deprecated("2016-07-04", "Instructions.") @property def _prop(self): return "prop_wrong_order" @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): """prop doc. Returns: String. """ return "prop_with_doc" # Assert function docs are properly updated. self.assertEqual( "prop doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" "\n" "\nReturns:" "\n String." % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_with_doc", _Object()._prop) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): return "prop_no_doc" # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_no_doc", _Object()._prop) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) class DeprecatedArgsTest(test.TestCase): def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_args("", instructions, "deprecated") with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_args("07-04-2016", instructions, "deprecated") with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_args(date, None, "deprecated") with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_args(date, "", "deprecated") with self.assertRaisesRegexp(ValueError, "argument"): deprecation.deprecated_args(date, instructions) def test_deprecated_missing_args(self): date = "2016-07-04" instructions = "This is how you update..." def _fn(arg0, arg1, deprecated=None): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"): deprecation.deprecated_args(date, instructions, "missing")(_fn) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args. """ return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n deprecated: Deprecated!" "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): """fn doc.""" return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION ARGUMENTS" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_varargs(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, *deprecated): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True, False)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_kwargs(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, **deprecated): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, a=True, b=False)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_positional_and_named(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "d1", "d2") def _fn(arg0, d1=None, arg1=2, d2=None): return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1 # Assert calls without the deprecated arguments log nothing. self.assertEqual(2, _fn(1, arg1=2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated arguments log warnings. self.assertEqual(2, _fn(1, None, 2, d2=False)) self.assertEqual(2, mock_warning.call_count) (args1, _) = mock_warning.call_args_list[0] self.assertRegexpMatches(args1[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d1"]), set(args1[1:])) (args2, _) = mock_warning.call_args_list[1] self.assertRegexpMatches(args2[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d2"]), set(args2[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_positional_and_named_with_ok_vals(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, ("d1", None), ("d2", "my_ok_val")) def _fn(arg0, d1=None, arg1=2, d2=None): return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1 # Assert calls without the deprecated arguments log nothing. self.assertEqual(2, _fn(1, arg1=2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated arguments log warnings. self.assertEqual(2, _fn(1, False, 2, d2=False)) self.assertEqual(2, mock_warning.call_count) (args1, _) = mock_warning.call_args_list[0] self.assertRegexpMatches(args1[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d1"]), set(args1[1:])) (args2, _) = mock_warning.call_args_list[1] self.assertRegexpMatches(args2[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d2"]), set(args2[1:])) # Assert calls with the deprecated arguments don't log warnings if # the value matches the 'ok_val'. mock_warning.reset_mock() self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val")) self.assertEqual(0, mock_warning.call_count) class DeprecatedArgValuesTest(test.TestCase): def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_arg_values("", instructions, deprecated=True) with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_arg_values( "07-04-2016", instructions, deprecated=True) date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_arg_values(date, None, deprecated=True) with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_arg_values(date, "", deprecated=True) with self.assertRaisesRegexp(ValueError, "argument", deprecated=True): deprecation.deprecated_arg_values(date, instructions) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args. """ return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n deprecated: Deprecated!" "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn with deprecated value issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): """fn doc.""" return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn with deprecated value issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION ARGUMENTS" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) class DeprecationArgumentsTest(test.TestCase): def testDeprecatedArgumentLookup(self): good_value = 3 self.assertEqual( deprecation.deprecated_argument_lookup("val_new", good_value, "val_old", None), good_value) self.assertEqual( deprecation.deprecated_argument_lookup("val_new", None, "val_old", good_value), good_value) with self.assertRaisesRegexp(ValueError, "Cannot specify both 'val_old' and 'val_new'"): self.assertEqual( deprecation.deprecated_argument_lookup("val_new", good_value, "val_old", good_value), good_value) def testRewriteArgumentDocstring(self): docs = """Add `a` and `b` Args: a: first arg b: second arg """ new_docs = deprecation.rewrite_argument_docstring( deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right") new_docs_ref = """Add `left` and `right` Args: left: first arg right: second arg """ self.assertEqual(new_docs, new_docs_ref) if __name__ == "__main__": test.main()
srvg/ansible
refs/heads/devel
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py
47
# Copyright: (c) 2015, Ansible Inc, # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import copy from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase from ansible.utils.display import Display display = Display() class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect result = {} play_context = copy.deepcopy(self._play_context) play_context.network_os = self._get_network_os(task_vars) new_task = self._task.copy() module = self._get_implementation_module( play_context.network_os, self._task.action ) if not module: if self._task.args["fail_on_missing_module"]: result["failed"] = True else: result["failed"] = False result["msg"] = ( "Could not find implementation module %s for %s" % (self._task.action, play_context.network_os) ) return result new_task.action = module action = self._shared_loader_obj.action_loader.get( play_context.network_os, task=new_task, connection=self._connection, play_context=play_context, loader=self._loader, templar=self._templar, shared_loader_obj=self._shared_loader_obj, ) display.vvvv("Running implementation module %s" % module) return action.run(task_vars=task_vars) def _get_network_os(self, task_vars): if "network_os" in self._task.args and self._task.args["network_os"]: display.vvvv("Getting network OS from task argument") network_os = self._task.args["network_os"] elif self._play_context.network_os: display.vvvv("Getting network OS from inventory") network_os = self._play_context.network_os elif ( "network_os" in task_vars.get("ansible_facts", {}) and task_vars["ansible_facts"]["network_os"] ): display.vvvv("Getting network OS from fact") network_os = task_vars["ansible_facts"]["network_os"] else: raise AnsibleError( "ansible_network_os must be specified on this host to use platform agnostic modules" ) return network_os def _get_implementation_module(self, network_os, platform_agnostic_module): module_name = ( network_os.split(".")[-1] + "_" + platform_agnostic_module.partition("_")[2] ) if "." in network_os: fqcn_module = ".".join(network_os.split(".")[0:-1]) implementation_module = fqcn_module + "." + module_name else: implementation_module = module_name if implementation_module not in self._shared_loader_obj.module_loader: implementation_module = None return implementation_module
ingydotnet/pyyaml-mirror
refs/heads/master
lib/yaml/events.py
986
# Abstract classes. class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] if hasattr(self, key)] arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) class NodeEvent(Event): def __init__(self, anchor, start_mark=None, end_mark=None): self.anchor = anchor self.start_mark = start_mark self.end_mark = end_mark class CollectionStartEvent(NodeEvent): def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class CollectionEndEvent(Event): pass # Implementations. class StreamStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndEvent(Event): pass class DocumentStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit self.version = version self.tags = tags class DocumentEndEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit class AliasEvent(NodeEvent): pass class ScalarEvent(NodeEvent): def __init__(self, anchor, tag, implicit, value, start_mark=None, end_mark=None, style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class SequenceStartEvent(CollectionStartEvent): pass class SequenceEndEvent(CollectionEndEvent): pass class MappingStartEvent(CollectionStartEvent): pass class MappingEndEvent(CollectionEndEvent): pass
gandarez/wakatime
refs/heads/master
wakatime/projects/__init__.py
12133432
mattray/stackalytics
refs/heads/master
tests/__init__.py
12133432
olemke/pyatmlab
refs/heads/master
tests/__init__.py
12133432
noushadali/androguard
refs/heads/master
androguard/decompiler/dad/dataflow.py
34
# This file is part of Androguard. # # Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import defaultdict from androguard.decompiler.dad.instruction import (Variable, ThisParam, Param) from androguard.decompiler.dad.util import build_path, common_dom from androguard.decompiler.dad.node import Node logger = logging.getLogger('dad.control_flow') class BasicReachDef(object): def __init__(self, graph, params): self.g = graph self.A = defaultdict(set) self.R = defaultdict(set) self.DB = defaultdict(set) self.defs = defaultdict(lambda: defaultdict(set)) self.def_to_loc = defaultdict(set) # Deal with special entry node entry = graph.entry self.A[entry] = range(-1, -len(params) - 1, -1) for loc, param in enumerate(params, 1): self.defs[entry][param].add(-loc) self.def_to_loc[param].add(-loc) # Deal with the other nodes for node in graph.rpo: for i, ins in node.get_loc_with_ins(): kill = ins.get_lhs() if kill is not None: self.defs[node][kill].add(i) self.def_to_loc[kill].add(i) for defs, values in self.defs[node].items(): self.DB[node].add(max(values)) def run(self): nodes = self.g.rpo[:] while nodes: node = nodes.pop(0) newR = set() for pred in self.g.all_preds(node): newR.update(self.A[pred]) if newR and newR != self.R[node]: self.R[node] = newR for suc in self.g.all_sucs(node): if suc not in nodes: nodes.append(suc) killed_locs = set() for reg in self.defs[node]: killed_locs.update(self.def_to_loc[reg]) A = set() for loc in self.R[node]: if loc not in killed_locs: A.add(loc) newA = A.union(self.DB[node]) if newA != self.A[node]: self.A[node] = newA for suc in self.g.all_sucs(node): if suc not in nodes: nodes.append(suc) def update_chain(graph, loc, du, ud): ''' Updates the DU chain of the instruction located at loc such that there is no more reference to it so that we can remove it. When an instruction is found to be dead (i.e it has no side effect, and the register defined is not used) we have to update the DU chain of all the variables that may me used by the dead instruction. ''' ins = graph.get_ins_from_loc(loc) for var in ins.get_used_vars(): # We get the definition points of the current variable for def_loc in set(ud[(var, loc)]): # We remove the use of the variable at loc from the DU chain of # the variable definition located at def_loc du[(var, def_loc)].remove(loc) ud[(var, loc)].remove(def_loc) if not ud.get((var, loc)): ud.pop((var, loc)) # If the DU chain of the defined variable is now empty, this means # that we may have created a new dead instruction, so we check that # the instruction has no side effect and we update the DU chain of # the new dead instruction, and we delete it. # We also make sure that def_loc is not < 0. This is the case when # the current variable is a method parameter. if def_loc >= 0 and not du[(var, def_loc)]: du.pop((var, def_loc)) def_ins = graph.get_ins_from_loc(def_loc) if def_ins.is_call(): def_ins.remove_defined_var() elif def_ins.has_side_effect(): continue else: update_chain(graph, def_loc, du, ud) graph.remove_ins(def_loc) def dead_code_elimination(graph, du, ud): ''' Run a dead code elimination pass. Instructions are checked to be dead. If it is the case, we remove them and we update the DU & UD chains of its variables to check for further dead instructions. ''' for node in graph.rpo: for i, ins in node.get_loc_with_ins()[:]: reg = ins.get_lhs() if reg is not None: # If the definition is not used, we check that the instruction # has no side effect. If there is one and this is a call, we # remove only the unused defined variable. else, this is # something like an array access, so we do nothing. # Otherwise (no side effect) we can remove the instruction from # the node. if (reg, i) not in du: if ins.is_call(): ins.remove_defined_var() elif ins.has_side_effect(): continue else: # We can delete the instruction. First update the DU # chain of the variables used by the instruction to # `let them know` that they are not used anymore by the # deleted instruction. # Then remove the instruction. update_chain(graph, i, du, ud) graph.remove_ins(i) def clear_path_node(graph, reg, loc1, loc2): for loc in xrange(loc1, loc2): ins = graph.get_ins_from_loc(loc) logger.debug(' treat loc: %d, ins: %s', loc, ins) if ins is None: continue logger.debug(' LHS: %s, side_effect: %s', ins.get_lhs(), ins.has_side_effect()) if ins.get_lhs() == reg or ins.has_side_effect(): return False return True def clear_path(graph, reg, loc1, loc2): ''' Check that the path from loc1 to loc2 is clear. We have to check that there is no side effect between the two location points. We also have to check that the variable `reg` is not redefined along one of the possible pathes from loc1 to loc2. ''' logger.debug('clear_path: reg(%s), loc1(%s), loc2(%s)', reg, loc1, loc2) node1 = graph.get_node_from_loc(loc1) node2 = graph.get_node_from_loc(loc2) # If both instructions are in the same node, we only have to check that the # path is clear inside the node if node1 is node2: return clear_path_node(graph, reg, loc1 + 1, loc2) # If instructions are in different nodes, we also have to check the nodes # in the path between the two locations. if not clear_path_node(graph, reg, loc1 + 1, node1.ins_range[1]): return False path = build_path(graph, node1, node2) for node in path: locs = node.ins_range end_loc = loc2 if (locs[0] <= loc2 <= locs[1]) else locs[1] if not clear_path_node(graph, reg, locs[0], end_loc): return False return True def register_propagation(graph, du, ud): ''' Propagate the temporary registers between instructions and remove them if necessary. We process the nodes of the graph in reverse post order. For each instruction in the node, we look at the variables that it uses. For each of these variables we look where it is defined and if we can replace it with its definition. We have to be careful to the side effects some instructions may have. To do the propagation, we use the computed DU and UD chains. ''' change = True while change: change = False for node in graph.rpo: for i, ins in node.get_loc_with_ins()[:]: logger.debug('Treating instruction %d: %s', i, ins) # We make sure the ins has not been deleted since the start of # the iteration if ins not in node.get_ins(): logger.debug(' => skip instruction (deleted)') continue logger.debug(' Used vars: %s', ins.get_used_vars()) for var in ins.get_used_vars(): # Get the list of locations this variable is defined at. locs = ud[(var, i)] logger.debug(' var %s defined in lines %s', var, locs) # If the variable is uniquely defined for this instruction # it may be eligible for propagation. if len(locs) != 1: continue loc = locs[0] # Methods parameters are defined with a location < 0. if loc < 0: continue orig_ins = graph.get_ins_from_loc(loc) logger.debug(' -> %s', orig_ins) logger.debug(' -> DU(%s, %s) = %s', var, loc, du[(var, loc)]) # We defined some instructions as not propagable. # Actually this is the case only for array creation # (new foo[x]) if not orig_ins.is_propagable(): logger.debug(' %s not propagable...', orig_ins) continue if not orig_ins.get_rhs().is_const(): # We only try to propagate constants and definition # points which are used at only one location. if len(du[(var, loc)]) > 1: logger.debug(' => variable has multiple uses' ' and is not const => skip') continue # We check that the propagation is safe for all the # variables that are used in the instruction. # The propagation is not safe if there is a side effect # along the path from the definition of the variable # to its use in the instruction, or if the variable may # be redifined along this path. safe = True orig_ins_used_vars = orig_ins.get_used_vars() logger.debug(' variables used by the original ' 'instruction: %s', orig_ins_used_vars) for var2 in orig_ins_used_vars: # loc is the location of the defined variable # i is the location of the current instruction if not clear_path(graph, var2, loc, i): safe = False break if not safe: logger.debug('Propagation NOT SAFE') continue # We also check that the instruction itself is # propagable. If the instruction has a side effect it # cannot be propagated if there is another side effect # along the path if orig_ins.has_side_effect(): if not clear_path(graph, None, loc, i): logger.debug(' %s has side effect and the ' 'path is not clear !', orig_ins) continue logger.debug(' => Modification of the instruction!') logger.debug(' - BEFORE: %s', ins) ins.replace(var, orig_ins.get_rhs()) logger.debug(' -> AFTER: %s', ins) logger.debug('\t UD(%s, %s) : %s', var, i, ud[(var, i)]) ud[(var, i)].remove(loc) logger.debug('\t -> %s', ud[(var, i)]) if len(ud[(var, i)]) == 0: ud.pop((var, i)) for var2 in orig_ins.get_used_vars(): # We update the UD chain of the variables we # propagate. We also have to take the # definition points of all the variables used # by the instruction and update the DU chain # with this information. old_ud = ud.get((var2, loc)) logger.debug('\t ud(%s, %s) = %s', var2, loc, old_ud) # If the instruction use the same variable # multiple times, the second+ time the ud chain # will be None because already treated. if old_ud is None: continue ud[(var2, i)].extend(old_ud) logger.debug('\t - ud(%s, %s) = %s', var2, i, ud[(var2, i)]) ud.pop((var2, loc)) for def_loc in old_ud: du[(var2, def_loc)].remove(loc) du[(var2, def_loc)].append(i) new_du = du[(var, loc)] logger.debug('\t new_du(%s, %s): %s', var, loc, new_du) new_du.remove(i) logger.debug('\t -> %s', new_du) if not new_du: logger.debug('\t REMOVING INS %d', loc) du.pop((var, loc)) graph.remove_ins(loc) change = True class DummyNode(Node): def __init__(self, name): super(DummyNode, self).__init__(name) def get_loc_with_ins(self): return [] def __repr__(self): return '%s-dumnode' % self.name def __str__(self): return '%s-dummynode' % self.name def split_variables(graph, lvars, DU, UD): treated = defaultdict(list) variables = defaultdict(list) for var, loc in sorted(DU): if var not in lvars: continue if loc in treated[var]: continue defs = [loc] uses = set(DU[(var, loc)]) change = True while change: change = False for use in uses: ldefs = UD[(var, use)] for ldef in ldefs: if ldef not in defs: defs.append(ldef) change = True for ldef in defs[1:]: luses = set(DU[(var, ldef)]) for use in luses: if use not in uses: uses.add(use) change = True treated[var].extend(defs) variables[var].append((defs, list(uses))) if lvars: nb_vars = max(lvars) + 1 else: nb_vars = 0 for var, versions in variables.iteritems(): nversions = len(versions) if nversions == 1: continue orig_var = lvars.pop(var) for i, (defs, uses) in enumerate(versions): if min(defs) < 0: # Param if orig_var.this: new_version = ThisParam(var, orig_var.type) else: new_version = Param(var, orig_var.type) lvars[var] = new_version else: new_version = Variable(nb_vars) new_version.type = orig_var.type lvars[nb_vars] = new_version # add new version to variables nb_vars += 1 new_version.name = '%d_%d' % (var, i) for loc in defs: if loc < 0: continue ins = graph.get_ins_from_loc(loc) ins.replace_lhs(new_version) DU[(new_version.value(), loc)] = DU.pop((var, loc)) for loc in uses: ins = graph.get_ins_from_loc(loc) ins.replace_var(var, new_version) UD[(new_version.value(), loc)] = UD.pop((var, loc)) def build_def_use(graph, lparams): ''' Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the method. ''' # We insert two special nodes : entry & exit, to the graph. # This is done to simplify the reaching definition analysis. old_entry = graph.entry old_exit = graph.exit new_entry = DummyNode('entry') graph.add_node(new_entry) graph.add_edge(new_entry, old_entry) graph.entry = new_entry if old_exit: new_exit = DummyNode('exit') graph.add_node(new_exit) graph.add_edge(old_exit, new_exit) graph.rpo.append(new_exit) analysis = BasicReachDef(graph, set(lparams)) analysis.run() # The analysis is done, We can now remove the two special nodes. graph.remove_node(new_entry) if old_exit: graph.remove_node(new_exit) graph.entry = old_entry UD = defaultdict(list) for node in graph.rpo: for i, ins in node.get_loc_with_ins(): for var in ins.get_used_vars(): # var not in analysis.def_to_loc: test that the register # exists. It is possible that it is not the case, when a # variable is of a type which is stored on multiple registers # e.g: a 'double' stored in v3 is also present in v4, so a call # to foo(v3), will in fact call foo(v3, v4). if var not in analysis.def_to_loc: continue ldefs = analysis.defs[node] prior_def = -1 for v in ldefs.get(var, set()): if prior_def < v < i: prior_def = v if prior_def >= 0: UD[(var, i)].append(prior_def) else: intersect = analysis.def_to_loc[var].intersection( analysis.R[node]) UD[(var, i)].extend(intersect) DU = defaultdict(list) for var_loc, defs_loc in UD.items(): var, loc = var_loc for def_loc in defs_loc: DU[(var, def_loc)].append(loc) return UD, DU def place_declarations(graph, dvars, du, ud): idom = graph.immediate_dominators() for node in graph.rpo: for loc, ins in node.get_loc_with_ins(): for var in ins.get_used_vars(): if (not isinstance(dvars[var], Variable) or isinstance(dvars[var], Param)): continue var_defs_locs = ud[(var, loc)] def_nodes = set() for def_loc in var_defs_locs: def_node = graph.get_node_from_loc(def_loc) # TODO: place declarations in catch if needed if def_node.in_catch: continue def_nodes.add(def_node) if not def_nodes: continue common_dominator = def_nodes.pop() for def_node in def_nodes: common_dominator = common_dom( idom,common_dominator, def_node) if any(var in range(*common_dominator.ins_range) for var in ud[(var, loc)]): continue common_dominator.add_variable_declaration(dvars[var])
aminghadersohi/airflow
refs/heads/master
airflow/ti_deps/__init__.py
1049
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
molebot/brython
refs/heads/master
www/src/Lib/test/test_range.py
7
# Python test set -- built-in functions import unittest from test import support import sys import pickle import itertools # pure Python implementations (3 args only), for comparison def pyrange(start, stop, step): if (start - stop) // step < 0: # replace stop with next element in the sequence of integers # that are congruent to start modulo step. stop += (start - stop) % step while start != stop: yield start start += step def pyrange_reversed(start, stop, step): stop += (start - stop) % step return pyrange(stop - step, start - step, -step) class RangeTest(unittest.TestCase): def assert_iterators_equal(self, xs, ys, test_id, limit=None): # check that an iterator xs matches the expected results ys, # up to a given limit. if limit is not None: xs = itertools.islice(xs, limit) ys = itertools.islice(ys, limit) sentinel = object() pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel) for i, (x, y) in enumerate(pairs): if x == y: continue elif x == sentinel: self.fail('{}: iterator ended unexpectedly ' 'at position {}; expected {}'.format(test_id, i, y)) elif y == sentinel: self.fail('{}: unexpected excess element {} at ' 'position {}'.format(test_id, x, i)) else: self.fail('{}: wrong element at position {};' 'expected {}, got {}'.format(test_id, i, y, x)) def test_range(self): self.assertEqual(list(range(3)), [0, 1, 2]) self.assertEqual(list(range(1, 5)), [1, 2, 3, 4]) self.assertEqual(list(range(0)), []) self.assertEqual(list(range(-3)), []) self.assertEqual(list(range(1, 10, 3)), [1, 4, 7]) self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4]) a = 10 b = 100 c = 50 self.assertEqual(list(range(a, a+2)), [a, a+1]) self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1]) self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2]) seq = list(range(a, b, c)) self.assertIn(a, seq) self.assertNotIn(b, seq) self.assertEqual(len(seq), 2) seq = list(range(b, a, -c)) self.assertIn(b, seq) self.assertNotIn(a, seq) self.assertEqual(len(seq), 2) seq = list(range(-a, -b, -c)) self.assertIn(-a, seq) self.assertNotIn(-b, seq) self.assertEqual(len(seq), 2) self.assertRaises(TypeError, range) self.assertRaises(TypeError, range, 1, 2, 3, 4) self.assertRaises(ValueError, range, 1, 2, 0) self.assertRaises(TypeError, range, 0.0, 2, 1) self.assertRaises(TypeError, range, 1, 2.0, 1) self.assertRaises(TypeError, range, 1, 2, 1.0) self.assertRaises(TypeError, range, 1e100, 1e101, 1e101) self.assertRaises(TypeError, range, 0, "spam") self.assertRaises(TypeError, range, 0, 42, "spam") self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2) r = range(-sys.maxsize, sys.maxsize, 2) self.assertEqual(len(r), sys.maxsize) def test_large_operands(self): x = range(10**20, 10**20+10, 3) self.assertEqual(len(x), 4) self.assertEqual(len(list(x)), 4) x = range(10**20+10, 10**20, 3) self.assertEqual(len(x), 0) self.assertEqual(len(list(x)), 0) x = range(10**20, 10**20+10, -3) self.assertEqual(len(x), 0) self.assertEqual(len(list(x)), 0) x = range(10**20+10, 10**20, -3) self.assertEqual(len(x), 4) self.assertEqual(len(list(x)), 4) # Now test range() with longs self.assertEqual(list(range(-2**100)), []) self.assertEqual(list(range(0, -2**100)), []) self.assertEqual(list(range(0, 2**100, -1)), []) self.assertEqual(list(range(0, 2**100, -1)), []) a = int(10 * sys.maxsize) b = int(100 * sys.maxsize) c = int(50 * sys.maxsize) self.assertEqual(list(range(a, a+2)), [a, a+1]) self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1]) self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2]) seq = list(range(a, b, c)) self.assertIn(a, seq) self.assertNotIn(b, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], a) self.assertEqual(seq[-1], a+c) seq = list(range(b, a, -c)) self.assertIn(b, seq) self.assertNotIn(a, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], b) self.assertEqual(seq[-1], b-c) seq = list(range(-a, -b, -c)) self.assertIn(-a, seq) self.assertNotIn(-b, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], -a) self.assertEqual(seq[-1], -a-c) def test_large_range(self): # Check long ranges (len > sys.maxsize) # len() is expected to fail due to limitations of the __len__ protocol def _range_len(x): try: length = len(x) except OverflowError: step = x[1] - x[0] length = 1 + ((x[-1] - x[0]) // step) return length a = -sys.maxsize b = sys.maxsize expected_len = b - a x = range(a, b) self.assertIn(a, x) self.assertNotIn(b, x) #self.assertRaises(OverflowError, len, x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+idx) self.assertEqual(x[idx:idx+1][0], a+idx) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = 0 b = 2 * sys.maxsize expected_len = b - a x = range(a, b) self.assertIn(a, x) self.assertNotIn(b, x) #self.assertRaises(OverflowError, len, x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+idx) self.assertEqual(x[idx:idx+1][0], a+idx) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = 0 b = sys.maxsize**10 c = 2*sys.maxsize expected_len = 1 + (b - a) // c x = range(a, b, c) self.assertIn(a, x) self.assertNotIn(b, x) #self.assertRaises(OverflowError, len, x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+(idx*c)) self.assertEqual(x[idx:idx+1][0], a+(idx*c)) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = sys.maxsize**10 b = 0 c = -2*sys.maxsize expected_len = 1 + (b - a) // c x = range(a, b, c) self.assertIn(a, x) self.assertNotIn(b, x) #self.assertRaises(OverflowError, len, x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+(idx*c)) self.assertEqual(x[idx:idx+1][0], a+(idx*c)) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] def test_invalid_invocation(self): self.assertRaises(TypeError, range) self.assertRaises(TypeError, range, 1, 2, 3, 4) self.assertRaises(ValueError, range, 1, 2, 0) a = int(10 * sys.maxsize) self.assertRaises(ValueError, range, a, a + 1, int(0)) self.assertRaises(TypeError, range, 1., 1., 1.) self.assertRaises(TypeError, range, 1e100, 1e101, 1e101) self.assertRaises(TypeError, range, 0, "spam") self.assertRaises(TypeError, range, 0, 42, "spam") # Exercise various combinations of bad arguments, to check # refcounting logic self.assertRaises(TypeError, range, 0.0) self.assertRaises(TypeError, range, 0, 0.0) self.assertRaises(TypeError, range, 0.0, 0) self.assertRaises(TypeError, range, 0.0, 0.0) self.assertRaises(TypeError, range, 0, 0, 1.0) self.assertRaises(TypeError, range, 0, 0.0, 1) self.assertRaises(TypeError, range, 0, 0.0, 1.0) self.assertRaises(TypeError, range, 0.0, 0, 1) self.assertRaises(TypeError, range, 0.0, 0, 1.0) self.assertRaises(TypeError, range, 0.0, 0.0, 1) self.assertRaises(TypeError, range, 0.0, 0.0, 1.0) def test_index(self): u = range(2) self.assertEqual(u.index(0), 0) self.assertEqual(u.index(1), 1) self.assertRaises(ValueError, u.index, 2) u = range(-2, 3) self.assertEqual(u.count(0), 1) self.assertEqual(u.index(0), 2) self.assertRaises(TypeError, u.index) class BadExc(Exception): pass class BadCmp: def __eq__(self, other): if other == 2: raise BadExc() return False a = range(4) self.assertRaises(BadExc, a.index, BadCmp()) a = range(-2, 3) self.assertEqual(a.index(0), 2) self.assertEqual(range(1, 10, 3).index(4), 1) self.assertEqual(range(1, -10, -3).index(-5), 2) self.assertEqual(range(10**20).index(1), 1) self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1) self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87) self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86) class AlwaysEqual(object): def __eq__(self, other): return True always_equal = AlwaysEqual() self.assertEqual(range(10).index(always_equal), 0) def test_user_index_method(self): bignum = 2*sys.maxsize smallnum = 42 # User-defined class with an __index__ method class I: def __init__(self, n): self.n = int(n) def __index__(self): return self.n self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum]) self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum]) # User-defined class with a failing __index__ method class IX: def __index__(self): raise RuntimeError self.assertRaises(RuntimeError, range, IX()) # User-defined class with an invalid __index__ method class IN: def __index__(self): return "not a number" self.assertRaises(TypeError, range, IN()) # Test use of user-defined classes in slice indices. self.assertEqual(list(range(10)[:I(5)]), list(range(5))) with self.assertRaises(RuntimeError): range(0, 10)[:IX()] with self.assertRaises(TypeError): range(0, 10)[:IN()] def test_count(self): self.assertEqual(range(3).count(-1), 0) self.assertEqual(range(3).count(0), 1) self.assertEqual(range(3).count(1), 1) self.assertEqual(range(3).count(2), 1) self.assertEqual(range(3).count(3), 0) self.assertIs(type(range(3).count(-1)), int) self.assertIs(type(range(3).count(1)), int) self.assertEqual(range(10**20).count(1), 1) self.assertEqual(range(10**20).count(10**20), 0) self.assertEqual(range(3).index(1), 1) self.assertEqual(range(1, 2**100, 2).count(2**87), 0) self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1) class AlwaysEqual(object): def __eq__(self, other): return True always_equal = AlwaysEqual() self.assertEqual(range(10).count(always_equal), 10) self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10) def test_repr(self): self.assertEqual(repr(range(1)), 'range(0, 1)') self.assertEqual(repr(range(1, 2)), 'range(1, 2)') self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)') def _test_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: r = range(*t) self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))), list(r)) def _test_iterator_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: it = itorg = iter(range(*t)) data = list(range(*t)) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), data) it = pickle.loads(d) try: next(it) except StopIteration: continue d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(list(it), data[1:]) def test_odd_bug(self): # This used to raise a "SystemError: NULL result without error" # because the range validation step was eating the exception # before NULL was returned. with self.assertRaises(TypeError): range([], 1, -1) def test_types(self): # Non-integer objects *equal* to any of the range's items are supposed # to be contained in the range. self.assertIn(1.0, range(3)) self.assertIn(True, range(3)) self.assertIn(1+0j, range(3)) class C1: def __eq__(self, other): return True self.assertIn(C1(), range(3)) # Objects are never coerced into other types for comparison. class C2: def __int__(self): return 1 def __index__(self): return 1 self.assertNotIn(C2(), range(3)) # ..except if explicitly told so. self.assertIn(int(C2()), range(3)) # Check that the range.__contains__ optimization is only # used for ints, not for instances of subclasses of int. class C3(int): def __eq__(self, other): return True self.assertIn(C3(11), range(10)) self.assertIn(C3(11), list(range(10))) def test_strided_limits(self): r = range(0, 101, 2) self.assertIn(0, r) self.assertNotIn(1, r) self.assertIn(2, r) self.assertNotIn(99, r) self.assertIn(100, r) self.assertNotIn(101, r) r = range(0, -20, -1) self.assertIn(0, r) self.assertIn(-1, r) self.assertIn(-19, r) self.assertNotIn(-20, r) r = range(0, -20, -2) self.assertIn(-18, r) self.assertNotIn(-19, r) self.assertNotIn(-20, r) def test_empty(self): r = range(0) self.assertNotIn(0, r) self.assertNotIn(1, r) r = range(0, -10) self.assertNotIn(0, r) self.assertNotIn(-1, r) self.assertNotIn(1, r) def test_range_iterators(self): # exercise 'fast' iterators, that use a rangeiterobject internally. # see issue 7298 limits = [base + jiggle for M in (2**32, 2**64) for base in (-M, -M//2, 0, M//2, M) for jiggle in (-2, -1, 0, 1, 2)] test_ranges = [(start, end, step) for start in limits for end in limits for step in (-2**63, -2**31, -2, -1, 1, 2)] for start, end, step in test_ranges: iter1 = range(start, end, step) iter2 = pyrange(start, end, step) test_id = "range({}, {}, {})".format(start, end, step) # check first 100 entries self.assert_iterators_equal(iter1, iter2, test_id, limit=100) iter1 = reversed(range(start, end, step)) iter2 = pyrange_reversed(start, end, step) test_id = "reversed(range({}, {}, {}))".format(start, end, step) self.assert_iterators_equal(iter1, iter2, test_id, limit=100) def test_slice(self): def check(start, stop, step=None): i = slice(start, stop, step) self.assertEqual(list(r[i]), list(r)[i]) self.assertEqual(len(r[i]), len(list(r)[i])) for r in [range(10), range(0), range(1, 9, 3), range(8, 0, -3), range(sys.maxsize+1, sys.maxsize+10), ]: check(0, 2) check(0, 20) check(1, 2) check(20, 30) check(-30, -20) check(-1, 100, 2) check(0, -1) check(-1, -3, -1) def test_contains(self): r = range(10) self.assertIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(9, -1, -1) self.assertIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(0, 10, 2) self.assertIn(0, r) self.assertNotIn(1, r) self.assertNotIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(9, -1, -2) self.assertNotIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) def test_reverse_iteration(self): for r in [range(10), range(0), range(1, 9, 3), range(8, 0, -3), range(sys.maxsize+1, sys.maxsize+10), ]: self.assertEqual(list(reversed(r)), list(r)[::-1]) def test_issue11845(self): r = range(*slice(1, 18, 2).indices(20)) values = {None, 0, 1, -1, 2, -2, 5, -5, 19, -19, 20, -20, 21, -21, 30, -30, 99, -99} for i in values: for j in values: for k in values - {0}: r[i:j:k] def test_comparison(self): test_ranges = [range(0), range(0, -1), range(1, 1, 3), range(1), range(5, 6), range(5, 6, 2), range(5, 7, 2), range(2), range(0, 4, 2), range(0, 5, 2), range(0, 6, 2)] test_tuples = list(map(tuple, test_ranges)) # Check that equality of ranges matches equality of the corresponding # tuples for each pair from the test lists above. ranges_eq = [a == b for a in test_ranges for b in test_ranges] tuples_eq = [a == b for a in test_tuples for b in test_tuples] self.assertEqual(ranges_eq, tuples_eq) # Check that != correctly gives the logical negation of == ranges_ne = [a != b for a in test_ranges for b in test_ranges] self.assertEqual(ranges_ne, [not x for x in ranges_eq]) # Equal ranges should have equal hashes. for a in test_ranges: for b in test_ranges: if a == b: self.assertEqual(hash(a), hash(b)) # Ranges are unequal to other types (even sequence types) self.assertIs(range(0) == (), False) self.assertIs(() == range(0), False) self.assertIs(range(2) == [0, 1], False) # Huge integers aren't a problem. self.assertEqual(range(0, 2**100 - 1, 2), range(0, 2**100, 2)) self.assertEqual(hash(range(0, 2**100 - 1, 2)), hash(range(0, 2**100, 2))) self.assertNotEqual(range(0, 2**100, 2), range(0, 2**100 + 1, 2)) self.assertEqual(range(2**200, 2**201 - 2**99, 2**100), range(2**200, 2**201, 2**100)) self.assertEqual(hash(range(2**200, 2**201 - 2**99, 2**100)), hash(range(2**200, 2**201, 2**100))) self.assertNotEqual(range(2**200, 2**201, 2**100), range(2**200, 2**201 + 1, 2**100)) # Order comparisons are not implemented for ranges. with self.assertRaises(TypeError): range(0) < range(0) with self.assertRaises(TypeError): range(0) > range(0) with self.assertRaises(TypeError): range(0) <= range(0) with self.assertRaises(TypeError): range(0) >= range(0) def test_attributes(self): # test the start, stop and step attributes of range objects self.assert_attrs(range(0), 0, 0, 1) self.assert_attrs(range(10), 0, 10, 1) self.assert_attrs(range(-10), 0, -10, 1) self.assert_attrs(range(0, 10, 1), 0, 10, 1) self.assert_attrs(range(0, 10, 3), 0, 10, 3) self.assert_attrs(range(10, 0, -1), 10, 0, -1) self.assert_attrs(range(10, 0, -3), 10, 0, -3) def assert_attrs(self, rangeobj, start, stop, step): self.assertEqual(rangeobj.start, start) self.assertEqual(rangeobj.stop, stop) self.assertEqual(rangeobj.step, step) with self.assertRaises(AttributeError): rangeobj.start = 0 with self.assertRaises(AttributeError): rangeobj.stop = 10 with self.assertRaises(AttributeError): rangeobj.step = 1 with self.assertRaises(AttributeError): del rangeobj.start with self.assertRaises(AttributeError): del rangeobj.stop with self.assertRaises(AttributeError): del rangeobj.step def test_main(): support.run_unittest(RangeTest) if __name__ == "__main__": test_main()
nacc/cobbler
refs/heads/master
cobbler/action_acl.py
2
""" Configures acls for various users/groups so they can access the cobbler command line as non-root. Now that CLI is largely remoted (XMLRPC) this is largely just useful for not having to log in (access to shared-secret) file but also grants access to hand-edit various config files and other useful things. Copyright 2006-2009, Red Hat, Inc and Others Michael DeHaan <michael.dehaan AT gmail> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import os import os.path import shutil import sys import glob import traceback import errno import utils from cexceptions import * from utils import _ import clogger class AclConfig: def __init__(self,config,logger=None): """ Constructor """ self.config = config self.api = config.api self.settings = config.settings() if logger is None: logger = clogger.Logger() self.logger = logger def run(self,adduser=None,addgroup=None,removeuser=None,removegroup=None): """ Automate setfacl commands """ ok = False if adduser: ok = True self.modacl(True,True,adduser) if addgroup: ok = True self.modacl(True,False,addgroup) if removeuser: ok = True self.modacl(False,True,removeuser) if removegroup: ok = True self.modacl(False,False,removegroup) if not ok: raise CX("no arguments specified, nothing to do") def modacl(self,isadd,isuser,who): webdir = self.settings.webdir snipdir = self.settings.snippetsdir tftpboot = utils.tftpboot_location() PROCESS_DIRS = { "/var/log/cobbler" : "rwx", "/var/log/cobbler/tasks" : "rwx", "/var/lib/cobbler" : "rwx", "/etc/cobbler" : "rwx", tftpboot : "rwx", "/var/lib/cobbler/triggers" : "rwx" } if not snipdir.startswith("/var/lib/cobbler/"): PROCESS_DIRS[snipdir] = "r" cmd = "-R" if isadd: cmd = "%s -m" % cmd else: cmd = "%s -x" % cmd if isuser: cmd = "%s u:%s" % (cmd,who) else: cmd = "%s g:%s" % (cmd,who) for d in PROCESS_DIRS: how = PROCESS_DIRS[d] if isadd: cmd2 = "%s:%s" % (cmd,how) else: cmd2 = cmd cmd2 = "%s %s" % (cmd2,d) rc = utils.subprocess_call(self.logger,"setfacl -d %s" % cmd2,shell=True) if not rc == 0: utils.die(self.logger,"command failed") rc = utils.subprocess_call(self.logger,"setfacl %s" % cmd2,shell=True) if not rc == 0: utils.die(self.logger,"command failed")
redhat-openstack/glance
refs/heads/f22-patches
glance/db/metadata.py
1
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Metadata setup commands.""" from glance.common import utils from glance.db.sqlalchemy import api as db_api IMPL = utils.LazyPluggable( 'backend', config_group='database', sqlalchemy='glance.db.sqlalchemy.metadata') def load_metadefs(): """Read metadefinition files and insert data into the database""" return IMPL.db_load_metadefs(engine=db_api.get_engine(), metadata_path=None) def unload_metadefs(): """Unload metadefinitions from database""" return IMPL.db_unload_metadefs(engine=db_api.get_engine()) def export_metadefs(): """Export metadefinitions from database to files""" return IMPL.db_export_metadefs(engine=db_api.get_engine(), metadata_path=None)
GuessWhatGame/guesswhat
refs/heads/master
src/guesswhat/data_provider/oracle_batchifier.py
1
import numpy as np import collections from PIL import Image from generic.data_provider.batchifier import AbstractBatchifier from generic.data_provider.image_preprocessors import get_spatial_feat, resize_image from generic.data_provider.nlp_utils import padder answer_dict = \ {'Yes': np.array([1, 0, 0], dtype=np.int32), 'No': np.array([0, 1, 0], dtype=np.int32), 'N/A': np.array([0, 0, 1], dtype=np.int32) } class OracleBatchifier(AbstractBatchifier): def __init__(self, tokenizer, sources, status=list()): self.tokenizer = tokenizer self.sources = sources self.status = status def filter(self, games): if len(self.status) > 0: return [g for g in games if g.status in self.status] else: return games def apply(self, games): sources = self.sources tokenizer = self.tokenizer batch = collections.defaultdict(list) for i, game in enumerate(games): batch['raw'].append(game) image = game.image if 'question' in sources: assert len(game.questions) == 1 batch['question'].append(tokenizer.apply(game.questions[0])) if 'answer' in sources: assert len(game.answers) == 1 batch['answer'].append(answer_dict[game.answers[0]]) if 'category' in sources: batch['category'].append(game.object.category_id) if 'spatial' in sources: spat_feat = get_spatial_feat(game.object.bbox, image.width, image.height) batch['spatial'].append(spat_feat) if 'crop' in sources: batch['crop'].append(game.object.get_crop()) if 'image' in sources: batch['image'].append(image.get_image()) if 'mask' in sources: assert "image" in batch['image'], "mask input require the image source" mask = game.object.get_mask() ft_width, ft_height = batch['image'][-1].shape[1],\ batch['image'][-1].shape[2] # Use the image feature size (not the original img size) mask = resize_image(Image.fromarray(mask), height=ft_height, width=ft_width) batch['mask'].append(mask) # pad the questions if 'question' in sources: batch['question'], batch['seq_length'] = padder(batch['question'], padding_symbol=tokenizer.word2i['<padding>']) return batch
synicalsyntax/zulip
refs/heads/master
zerver/webhooks/updown/__init__.py
12133432
jalavik/invenio
refs/heads/master
invenio/modules/accounts/upgrades/__init__.py
12133432
oinopion/django
refs/heads/master
tests/test_client_regress/__init__.py
12133432
webmasterraj/GaSiProMo
refs/heads/master
flask/lib/python2.7/site-packages/gunicorn/management/__init__.py
12133432
xiangel/hue
refs/heads/master
desktop/core/src/desktop/require_login_test.py
37
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Test for RequireLoginEverywhereMiddleware in middleware.py # # This test uses "nose"-style testing (no need for a TestCase), # and nose-style assertions. from nose.tools import * from django.test.client import Client import django def test_require_login(): c = Client() # We're not logged in, so expect a redirection. response = c.get('/profile') assert_true(isinstance(response, django.http.HttpResponseRedirect), "Expected redirect") assert_equal("http://testserver/accounts/login/?next=/profile", response["Location"], "Expected redirection to login page") # AllowAllBackend should let us in. c.login(username="test", password="test") # And now we shouldn't need to be redirected. response = c.get('/', follow=True) assert_equal(200, response.status_code) def test_ajax_require_login(): c = Client() response = c.get('/profile', HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert_equal("LOGIN_REQUIRED", response["X-Hue-Middleware-Response"], "Expected magic header from middleware")
dennisobrien/bokeh
refs/heads/master
scripts/issues.py
4
#!/usr/bin/env python from __future__ import print_function import argparse import datetime import dateutil.parser import dateutil.tz import gzip import json import logging import os import pickle import sys from collections import OrderedDict from functools import partial from itertools import count, groupby from six.moves.urllib.request import urlopen, Request logging.basicConfig(level=logging.INFO) API_PARAMS = { 'base_url': 'https://api.github.com/repos', 'owner': 'bokeh', 'repo': 'bokeh', } IGNORE_ISSUE_TYPE = { 'type: discussion', 'type: tracker', } LOG_SECTION = OrderedDict([ # issue type label -> log section heading ('type: bug', 'bugfixes'), ('type: feature', 'features'), ('type: task', 'tasks'), ]) ISSUES_SORT_KEY = lambda issue: (issue_section_order(issue), int(issue['number'])) ISSUES_BY_SECTION = lambda issue: issue_section(issue) ####################################### # Object Storage ####################################### def save_object(filename, obj): """Compresses and pickles given object to the given filename.""" logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise def load_object(filename): """Unpickles and decompresses the given filename and returns the created object.""" logging.info('loading {}...'.format(filename)) try: with gzip.GzipFile(filename, 'rb') as f: buf = '' while True: data = f.read() if data == '': break buf += data return pickle.loads(buf) except Exception as e: logging.error('load failure: {}'.format(e)) raise ####################################### # Issues ####################################### def issue_section_order(issue): """Returns the section order for the given issue.""" try: return LOG_SECTION.values().index(issue_section(issue)) except: return -1 def issue_completed(issue): """Returns True iff this issue is has been resolved as completed.""" labels = issue.get('labels', []) return any(label['name'] == 'reso: completed' for label in labels) def issue_section(issue): """Returns the section heading for the issue, or None if this issue should be ignored.""" labels = issue.get('labels', []) for label in labels: if not label['name'].startswith('type: '): continue if label['name'] in LOG_SECTION: return LOG_SECTION[label['name']] elif label['name'] in IGNORE_ISSUE_TYPE: return None else: logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue))) return None def issue_tags(issue): """Returns list of tags for this issue.""" labels = issue.get('labels', []) return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')] def closed_issue(issue, after=None): """Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed.""" if issue['state'] == 'closed': if after is None or parse_timestamp(issue['closed_at']) > after: return True return False def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog.""" return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue)) def relevant_issues(issues, after): """Yields relevant closed issues (closed after a given datetime) given a list of issues.""" logging.info('finding relevant issues after {}...'.format(after)) seen = set() for issue in issues: if relevent_issue(issue, after) and issue['title'] not in seen: seen.add(issue['title']) yield issue def closed_issues(issues, after): """Yields closed issues (closed after a given datetime) given a list of issues.""" logging.info('finding closed issues after {}...'.format(after)) seen = set() for issue in issues: if closed_issue(issue, after) and issue['title'] not in seen: seen.add(issue['title']) yield issue def all_issues(issues): """Yields unique set of issues given a list of issues.""" logging.info('finding issues...') seen = set() for issue in issues: if issue['title'] not in seen: seen.add(issue['title']) yield issue ####################################### # GitHub API ####################################### def get_labels_url(): """Returns github API URL for querying labels.""" return '{base_url}/{owner}/{repo}/labels'.format(**API_PARAMS) def get_issues_url(page, after): """Returns github API URL for querying tags.""" template = '{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}' return template.format(page=page, after=after.isoformat(), **API_PARAMS) def get_tags_url(): """Returns github API URL for querying tags.""" return '{base_url}/{owner}/{repo}/tags'.format(**API_PARAMS) def parse_timestamp(timestamp): """Parse ISO8601 timestamps given by github API.""" dt = dateutil.parser.parse(timestamp) return dt.astimezone(dateutil.tz.tzutc()) def read_url(url): """Reads given URL as JSON and returns data as loaded python object.""" logging.debug('reading {url} ...'.format(url=url)) token = os.environ.get("BOKEH_GITHUB_API_TOKEN") headers = {} if token: headers['Authorization'] = 'token %s' % token request = Request(url, headers=headers) response = urlopen(request).read() return json.loads(response.decode("UTF-8")) def query_tags(): """Hits the github API for repository tags and returns the data.""" return read_url(get_tags_url()) def query_issues(page, after): """Hits the github API for a single page of closed issues and returns the data.""" return read_url(get_issues_url(page, after)) def query_all_issues(after): """Hits the github API for all closed issues after the given date, returns the data.""" page = count(1) data = [] while True: page_data = query_issues(next(page), after) if not page_data: break data.extend(page_data) return data def dateof(tag_name, tags): """Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.""" for tag in tags: if tag['name'] == tag_name: commit = read_url(tag['commit']['url']) return parse_timestamp(commit['commit']['committer']['date']) return None def get_data(query_func, load_data=False, save_data=False): """Gets data from query_func, optionally saving that data to a file; or loads data from a file.""" if hasattr(query_func, '__name__'): func_name = query_func.__name__ elif hasattr(query_func, 'func'): func_name = query_func.func.__name__ pickle_file = '{}.pickle'.format(func_name) if load_data: data = load_object(pickle_file) else: data = query_func() if save_data: save_object(pickle_file, data) return data ####################################### # Validation ####################################### def check_issue(issue, after): have_warnings = False labels = issue.get('labels', []) if 'pull_request' in issue: if not any(label['name'].startswith('status: ') for label in labels): logging.warning('pull request without status label: {}'.format(issue_line(issue))) have_warnings = True else: if not any(label['name'].startswith('type: ') for label in labels): if not any(label['name']=="reso: duplicate" for label in labels): logging.warning('issue with no type label: {}'.format(issue_line((issue)))) have_warnings = True if closed_issue(issue, after): if not any(label['name'].startswith('reso: ') for label in labels): if not any(label['name'] in IGNORE_ISSUE_TYPE for label in labels): logging.warning('closed issue with no reso label: {}'.format(issue_line((issue)))) have_warnings = True return have_warnings def check_issues(issues, after=None): """Checks issues for BEP 1 compliance.""" issues = closed_issues(issues, after) if after else all_issues(issues) issues = sorted(issues, key=ISSUES_SORT_KEY) have_warnings = False for section, issue_group in groupby(issues, key=ISSUES_BY_SECTION): for issue in issue_group: have_warnings |= check_issue(issue, after) return have_warnings ####################################### # Changelog ####################################### def issue_line(issue): """Returns log line for given issue.""" template = '#{number} {tags}{title}' tags = issue_tags(issue) params = { 'title': issue['title'].capitalize().rstrip('.'), 'number': issue['number'], 'tags': ' '.join('[{}]'.format(tag) for tag in tags) + (' ' if tags else '') } return template.format(**params) def generate_changelog(issues, after, heading, rtag=False): """Prints out changelog.""" relevent = relevant_issues(issues, after) relevent = sorted(relevent, key=ISSUES_BY_SECTION) def write(func, endofline="", append=""): func(heading + '\n' + '-' * 20 + endofline) for section, issue_group in groupby(relevent, key=ISSUES_BY_SECTION): func(' * {}:'.format(section) + endofline) for issue in reversed(list(issue_group)): func(' - {}'.format(issue_line(issue)) + endofline) func(endofline + append) if rtag is not False: with open("../CHANGELOG", "r+") as f: content = f.read() f.seek(0) write(f.write, '\n', content) else: write(print) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Creates a bokeh changelog using the github API.') limit_group = parser.add_mutually_exclusive_group(required=True) limit_group.add_argument('-d', '--since-date', metavar='DATE', help='select issues that occurred after the given ISO8601 date') limit_group.add_argument('-p', '--since-tag', metavar='TAG', help='select issues that occurred after the given git tag') parser.add_argument('-c', '--check', action='store_true', default=False, help='check closed issues for BEP 1 compliance') parser.add_argument('-r', '--release-tag', metavar='RELEASE', help='the proposed new release tag.\n' 'NOTE: this will automatically write the output to the CHANGELOG') data_group = parser.add_mutually_exclusive_group() data_group.add_argument('-s', '--save-data', action='store_true', default=False, help='save api query result data; useful for testing') data_group.add_argument('-l', '--load-data', action='store_true', default=False, help='load api data from previously saved data; useful for testing') args = parser.parse_args() if args.since_tag: tags = get_data(query_tags, load_data=args.load_data, save_data=args.save_data) after = dateof(args.since_tag, tags) heading = 'Since {:>14}:'.format(args.since_tag) elif args.since_date: after = dateutil.parser.parse(args.since_date) after = after.replace(tzinfo=dateutil.tz.tzlocal()) heading = 'Since {:>14}:'.format(after.date().isoformat()) issues = get_data(partial(query_all_issues, after), load_data=args.load_data, save_data=args.save_data) if args.check: have_warnings = check_issues(issues) if have_warnings: sys.exit(1) sys.exit(0) if args.release_tag: heading = '{} {:>8}:'.format(str(datetime.date.today()), args.release_tag) generate_changelog(issues, after, heading, args.release_tag) else: generate_changelog(issues, after, heading)
vipulkanade/EventbriteDjango
refs/heads/master
lib/python2.7/site-packages/django/contrib/syndication/__init__.py
808
default_app_config = 'django.contrib.syndication.apps.SyndicationConfig'
Venturi/cms
refs/heads/master
env/lib/python2.7/site-packages/djangocms_video/south_migrations/0001_initial.py
6
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models, connection class Migration(SchemaMigration): def forwards(self, orm): table_names = connection.introspection.table_names() if 'cmsplugin_video' in table_names: db.rename_table('cmsplugin_video', 'djangocms_video_video') elif 'video_video' in table_names: db.rename_table('video_video', 'djangocms_video_video') else: # Adding model 'Video' db.create_table(u'djangocms_video_video', ( (u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)), ('movie', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)), ('movie_url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('width', self.gf('django.db.models.fields.PositiveSmallIntegerField')()), ('height', self.gf('django.db.models.fields.PositiveSmallIntegerField')()), ('auto_play', self.gf('django.db.models.fields.BooleanField')(default=False)), ('auto_hide', self.gf('django.db.models.fields.BooleanField')(default=False)), ('fullscreen', self.gf('django.db.models.fields.BooleanField')(default=True)), ('loop', self.gf('django.db.models.fields.BooleanField')(default=False)), ('bgcolor', self.gf('django.db.models.fields.CharField')(default='000000', max_length=6)), ('textcolor', self.gf('django.db.models.fields.CharField')(default='FFFFFF', max_length=6)), ('seekbarcolor', self.gf('django.db.models.fields.CharField')(default='13ABEC', max_length=6)), ('seekbarbgcolor', self.gf('django.db.models.fields.CharField')(default='333333', max_length=6)), ('loadingbarcolor', self.gf('django.db.models.fields.CharField')(default='828282', max_length=6)), ('buttonoutcolor', self.gf('django.db.models.fields.CharField')(default='333333', max_length=6)), ('buttonovercolor', self.gf('django.db.models.fields.CharField')(default='000000', max_length=6)), ('buttonhighlightcolor', self.gf('django.db.models.fields.CharField')(default='FFFFFF', max_length=6)), )) db.send_create_signal(u'djangocms_video', ['Video']) def backwards(self, orm): # Deleting model 'Video' db.delete_table(u'djangocms_video_video') models = { 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, u'djangocms_video.video': { 'Meta': {'object_name': 'Video', '_ormbases': ['cms.CMSPlugin']}, 'auto_hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'auto_play': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'bgcolor': ('django.db.models.fields.CharField', [], {'default': "'000000'", 'max_length': '6'}), 'buttonhighlightcolor': ('django.db.models.fields.CharField', [], {'default': "'FFFFFF'", 'max_length': '6'}), 'buttonoutcolor': ('django.db.models.fields.CharField', [], {'default': "'333333'", 'max_length': '6'}), 'buttonovercolor': ('django.db.models.fields.CharField', [], {'default': "'000000'", 'max_length': '6'}), u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}), 'fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'loadingbarcolor': ('django.db.models.fields.CharField', [], {'default': "'828282'", 'max_length': '6'}), 'loop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'movie': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'movie_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'seekbarbgcolor': ('django.db.models.fields.CharField', [], {'default': "'333333'", 'max_length': '6'}), 'seekbarcolor': ('django.db.models.fields.CharField', [], {'default': "'13ABEC'", 'max_length': '6'}), 'textcolor': ('django.db.models.fields.CharField', [], {'default': "'FFFFFF'", 'max_length': '6'}), 'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {}) } } complete_apps = ['djangocms_video']
acshan/odoo
refs/heads/8.0
addons/project/wizard/__init__.py
381
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import project_task_delegate # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
sloria/osf.io
refs/heads/develop
api/nodes/utils.py
4
# -*- coding: utf-8 -*- from django.db.models import Q from rest_framework.exceptions import PermissionDenied, NotFound from rest_framework.status import is_server_error import requests from addons.osfstorage.models import OsfStorageFile, OsfStorageFolder from api.base.exceptions import ServiceUnavailableError from api.base.utils import get_object_or_error, waterbutler_api_url_for def get_file_object(node, path, provider, request): # Don't bother going to waterbutler for osfstorage if provider == 'osfstorage': # Kinda like /me for a user # The one odd case where path is not really path if path == '/': obj = node.get_addon('osfstorage').get_root() else: if path.endswith('/'): model = OsfStorageFolder else: model = OsfStorageFile obj = get_object_or_error(model, Q(node=node.pk, _id=path.strip('/')), request) return obj if not node.get_addon(provider) or not node.get_addon(provider).configured: raise NotFound('The {} provider is not configured for this project.'.format(provider)) view_only = request.query_params.get('view_only', default=None) url = waterbutler_api_url_for(node._id, provider, path, _internal=True, meta=True, view_only=view_only) waterbutler_request = requests.get( url, cookies=request.COOKIES, headers={'Authorization': request.META.get('HTTP_AUTHORIZATION')}, ) if waterbutler_request.status_code == 401: raise PermissionDenied if waterbutler_request.status_code == 404: raise NotFound if is_server_error(waterbutler_request.status_code): raise ServiceUnavailableError(detail='Could not retrieve files information at this time.') try: return waterbutler_request.json()['data'] except KeyError: raise ServiceUnavailableError(detail='Could not retrieve files information at this time.')
prior/webinars
refs/heads/master
webinars_web/webinars/models/snapshots/hubspot_registrant.py
1
from django.db import models from sanetime.dj import SaneTimeField class HubSpotRegistrantSnapshot(models.Model): class Meta: app_label = 'webinars' event = models.ForeignKey('Event') hashcode = models.IntegerField(null=False) email = models.CharField(max_length=64, null=False) first_name = models.CharField(null=True, max_length=64) last_name = models.CharField(null=True, max_length=64) lead_guid = models.CharField(null=True, max_length=36) initial_form_guid = models.CharField(null=True, max_length=36) registered_any = models.NullBooleanField(null=True) registered_this = models.NullBooleanField(null=True) attended_any = models.NullBooleanField(null=True) attended_this = models.NullBooleanField(null=True) started_at = SaneTimeField(null=True) stopped_at = SaneTimeField(null=True)
yongshengwang/builthue
refs/heads/master
desktop/core/ext-py/Django-1.4.5/tests/regressiontests/pagination_regress/tests.py
36
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.utils.unittest import TestCase class PaginatorTests(TestCase): """ Tests for the Paginator and Page classes. """ def check_paginator(self, params, output): """ Helper method that instantiates a Paginator object from the passed params and then checks that its attributes match the passed output. """ count, num_pages, page_range = output paginator = Paginator(*params) self.check_attribute('count', paginator, count, params) self.check_attribute('num_pages', paginator, num_pages, params) self.check_attribute('page_range', paginator, page_range, params) def check_attribute(self, name, paginator, expected, params): """ Helper method that checks a single attribute and gives a nice error message upon test failure. """ got = getattr(paginator, name) self.assertEqual(expected, got, "For '%s', expected %s but got %s. Paginator parameters were: %s" % (name, expected, got, params)) def test_invalid_page_number(self): """ Tests that invalid page numbers result in the correct exception being raised. """ paginator = Paginator([1, 2, 3], 2) self.assertRaises(PageNotAnInteger, paginator.validate_number, None) self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x') def test_paginator(self): """ Tests the paginator attributes using varying inputs. """ nine = [1, 2, 3, 4, 5, 6, 7, 8, 9] ten = nine + [10] eleven = ten + [11] tests = ( # Each item is two tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is resulting Paginator attributes - count, # num_pages, and page_range. # Ten items, varying orphans, no empty first page. ((ten, 4, 0, False), (10, 3, [1, 2, 3])), ((ten, 4, 1, False), (10, 3, [1, 2, 3])), ((ten, 4, 2, False), (10, 2, [1, 2])), ((ten, 4, 5, False), (10, 2, [1, 2])), ((ten, 4, 6, False), (10, 1, [1])), # Ten items, varying orphans, allow empty first page. ((ten, 4, 0, True), (10, 3, [1, 2, 3])), ((ten, 4, 1, True), (10, 3, [1, 2, 3])), ((ten, 4, 2, True), (10, 2, [1, 2])), ((ten, 4, 5, True), (10, 2, [1, 2])), ((ten, 4, 6, True), (10, 1, [1])), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1, [1])), (([1], 4, 1, False), (1, 1, [1])), (([1], 4, 2, False), (1, 1, [1])), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1, [1])), (([1], 4, 1, True), (1, 1, [1])), (([1], 4, 2, True), (1, 1, [1])), # Zero items, varying orphans, no empty first page. (([], 4, 0, False), (0, 0, [])), (([], 4, 1, False), (0, 0, [])), (([], 4, 2, False), (0, 0, [])), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 1, [1])), (([], 4, 1, True), (0, 1, [1])), (([], 4, 2, True), (0, 1, [1])), # Number if items one less than per_page. (([], 1, 0, True), (0, 1, [1])), (([], 1, 0, False), (0, 0, [])), (([1], 2, 0, True), (1, 1, [1])), ((nine, 10, 0, True), (9, 1, [1])), # Number if items equal to per_page. (([1], 1, 0, True), (1, 1, [1])), (([1, 2], 2, 0, True), (2, 1, [1])), ((ten, 10, 0, True), (10, 1, [1])), # Number if items one more than per_page. (([1, 2], 1, 0, True), (2, 2, [1, 2])), (([1, 2, 3], 2, 0, True), (3, 2, [1, 2])), ((eleven, 10, 0, True), (11, 2, [1, 2])), # Number if items one more than per_page with one orphan. (([1, 2], 1, 1, True), (2, 1, [1])), (([1, 2, 3], 2, 1, True), (3, 1, [1])), ((eleven, 10, 1, True), (11, 1, [1])), # Non-integer inputs ((ten, '4', 1, False), (10, 3, [1, 2, 3])), ((ten, u'4', 1, False), (10, 3, [1, 2, 3])), ((ten, 4, '1', False), (10, 3, [1, 2, 3])), ((ten, 4, u'1', False), (10, 3, [1, 2, 3])), ) for params, output in tests: self.check_paginator(params, output) def check_indexes(self, params, page_num, indexes): """ Helper method that instantiates a Paginator object from the passed params and then checks that the start and end indexes of the passed page_num match those given as a 2-tuple in indexes. """ paginator = Paginator(*params) if page_num == 'first': page_num = 1 elif page_num == 'last': page_num = paginator.num_pages page = paginator.page(page_num) start, end = indexes msg = ("For %s of page %s, expected %s but got %s." " Paginator parameters were: %s") self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params)) self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params)) def test_page_indexes(self): """ Tests that paginator pages have the correct start and end indexes. """ ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] tests = ( # Each item is three tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is the start and end indexes of the first page. # Third tuple is the start and end indexes of the last page. # Ten items, varying per_page, no orphans. ((ten, 1, 0, True), (1, 1), (10, 10)), ((ten, 2, 0, True), (1, 2), (9, 10)), ((ten, 3, 0, True), (1, 3), (10, 10)), ((ten, 5, 0, True), (1, 5), (6, 10)), # Ten items, varying per_page, with orphans. ((ten, 1, 1, True), (1, 1), (9, 10)), ((ten, 1, 2, True), (1, 1), (8, 10)), ((ten, 3, 1, True), (1, 3), (7, 10)), ((ten, 3, 2, True), (1, 3), (7, 10)), ((ten, 3, 4, True), (1, 3), (4, 10)), ((ten, 5, 1, True), (1, 5), (6, 10)), ((ten, 5, 2, True), (1, 5), (6, 10)), ((ten, 5, 5, True), (1, 10), (1, 10)), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1), (1, 1)), (([1], 4, 1, False), (1, 1), (1, 1)), (([1], 4, 2, False), (1, 1), (1, 1)), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1), (1, 1)), (([1], 4, 1, True), (1, 1), (1, 1)), (([1], 4, 2, True), (1, 1), (1, 1)), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 0), (0, 0)), (([], 4, 1, True), (0, 0), (0, 0)), (([], 4, 2, True), (0, 0), (0, 0)), ) for params, first, last in tests: self.check_indexes(params, 'first', first) self.check_indexes(params, 'last', last) # When no items and no empty first page, we should get EmptyPage error. self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None) self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None) self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None) def test_page_sequence(self): """ Tests that a paginator page acts like a standard sequence. """ eleven = 'abcdefghijk' page2 = Paginator(eleven, per_page=5, orphans=1).page(2) self.assertEqual(len(page2), 6) self.assertTrue('k' in page2) self.assertFalse('a' in page2) self.assertEqual(''.join(page2), 'fghijk') self.assertEqual(''.join(reversed(page2)), 'kjihgf')
int19h/PTVS
refs/heads/master
Python/Tests/TestData/TestDiscoverer/BasicPytest/test_pt.py
3
def test_pt_pass(): assert True def test_pt_fail(): assert False class TestClassPT(object): def test_method_pass(self): pass
davy39/eric
refs/heads/master
DebugClients/Python/AsyncFile.py
1
# -*- coding: utf-8 -*- # Copyright (c) 2002 - 2014 Detlev Offenbach <detlev@die-offenbachs.de> # """ Module implementing an asynchronous file like socket interface for the debugger. """ import socket from DebugProtocol import EOT def AsyncPendingWrite(file): """ Module function to check for data to be written. @param file The file object to be checked (file) @return Flag indicating if there is data wating (int) """ try: pending = file.pendingWrite() except: pending = 0 return pending class AsyncFile(object): """ Class wrapping a socket object with a file interface. """ maxtries = 10 maxbuffersize = 1024 * 1024 * 4 def __init__(self, sock, mode, name): """ Constructor @param sock the socket object being wrapped @param mode mode of this file (string) @param name name of this file (string) """ # Initialise the attributes. self.closed = 0 self.sock = sock self.mode = mode self.name = name self.nWriteErrors = 0 self.encoding = "utf-8" self.wpending = u'' def __checkMode(self, mode): """ Private method to check the mode. This method checks, if an operation is permitted according to the mode of the file. If it is not, an IOError is raised. @param mode the mode to be checked (string) @exception IOError raised to indicate a bad file descriptor """ if mode != self.mode: raise IOError('[Errno 9] Bad file descriptor') def __nWrite(self, n): """ Private method to write a specific number of pending bytes. @param n the number of bytes to be written (int) """ if n: try: buf = "%s%s" % (self.wpending[:n], EOT) try: buf = buf.encode('utf-8') except (UnicodeEncodeError, UnicodeDecodeError): pass self.sock.sendall(buf) self.wpending = self.wpending[n:] self.nWriteErrors = 0 except socket.error: self.nWriteErrors += 1 if self.nWriteErrors > self.maxtries: self.wpending = u'' # delete all output def pendingWrite(self): """ Public method that returns the number of bytes waiting to be written. @return the number of bytes to be written (int) """ return self.wpending.rfind('\n') + 1 def close(self, closeit=0): """ Public method to close the file. @param closeit flag to indicate a close ordered by the debugger code (boolean) """ if closeit and not self.closed: self.flush() self.sock.close() self.closed = 1 def flush(self): """ Public method to write all pending bytes. """ self.__nWrite(len(self.wpending)) def isatty(self): """ Public method to indicate whether a tty interface is supported. @return always false """ return 0 def fileno(self): """ Public method returning the file number. @return file number (int) """ try: return self.sock.fileno() except socket.error: return -1 def read_p(self, size=-1): """ Public method to read bytes from this file. @param size maximum number of bytes to be read (int) @return the bytes read (any) """ self.__checkMode('r') if size < 0: size = 20000 return self.sock.recv(size).decode('utf8') def read(self, size=-1): """ Public method to read bytes from this file. @param size maximum number of bytes to be read (int) @return the bytes read (any) """ self.__checkMode('r') buf = raw_input() if size >= 0: buf = buf[:size] return buf def readline_p(self, size=-1): """ Public method to read a line from this file. <b>Note</b>: This method will not block and may return only a part of a line if that is all that is available. @param size maximum number of bytes to be read (int) @return one line of text up to size bytes (string) """ self.__checkMode('r') if size < 0: size = 20000 # The integration of the debugger client event loop and the connection # to the debugger relies on the two lines of the debugger command being # delivered as two separate events. Therefore we make sure we only # read a line at a time. line = self.sock.recv(size, socket.MSG_PEEK) eol = line.find('\n') if eol >= 0: size = eol + 1 else: size = len(line) # Now we know how big the line is, read it for real. return self.sock.recv(size).decode('utf8') def readlines(self, sizehint=-1): """ Public method to read all lines from this file. @param sizehint hint of the numbers of bytes to be read (int) @return list of lines read (list of strings) """ self.__checkMode('r') lines = [] room = sizehint line = self.readline_p(room) linelen = len(line) while linelen > 0: lines.append(line) if sizehint >= 0: room = room - linelen if room <= 0: break line = self.readline_p(room) linelen = len(line) return lines def readline(self, sizehint=-1): """ Public method to read one line from this file. @param sizehint hint of the numbers of bytes to be read (int) @return one line read (string) """ self.__checkMode('r') line = raw_input() + '\n' if sizehint >= 0: line = line[:sizehint] return line def seek(self, offset, whence=0): """ Public method to move the filepointer. @param offset offset to seek for @param whence where to seek from @exception IOError This method is not supported and always raises an IOError. """ raise IOError('[Errno 29] Illegal seek') def tell(self): """ Public method to get the filepointer position. @exception IOError This method is not supported and always raises an IOError. """ raise IOError('[Errno 29] Illegal seek') def truncate(self, size=-1): """ Public method to truncate the file. @param size size to truncate to (integer) @exception IOError This method is not supported and always raises an IOError. """ raise IOError('[Errno 29] Illegal seek') def write(self, s): """ Public method to write a string to the file. @param s bytes to be written (string) @exception socket.error raised to indicate too many send attempts """ self.__checkMode('w') tries = 0 if not self.wpending: self.wpending = s elif type(self.wpending) != type(s) or \ len(self.wpending) + len(s) > self.maxbuffersize: # flush wpending so that different string types are not # concatenated while self.wpending: # if we have a persistent error in sending the data, an # exception will be raised in __nWrite self.flush() tries += 1 if tries > self.maxtries: raise socket.error("Too many attempts to send data") self.wpending = s else: self.wpending += s self.__nWrite(self.pendingWrite()) def writelines(self, list): """ Public method to write a list of strings to the file. @param list the list to be written (list of string) """ map(self.write, list) # # eflag: FileType = Python2
django-nonrel/django-nonrel
refs/heads/develop
django/contrib/redirects/admin.py
663
from django.contrib import admin from django.contrib.redirects.models import Redirect class RedirectAdmin(admin.ModelAdmin): list_display = ('old_path', 'new_path') list_filter = ('site',) search_fields = ('old_path', 'new_path') radio_fields = {'site': admin.VERTICAL} admin.site.register(Redirect, RedirectAdmin)
blockstack/blockstack-server
refs/heads/master
integration_tests/blockstack_integration_tests/scenarios/name_pre_reg_up_xfer_up_xfer_xfer_up.py
1
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Blockstack ~~~~~ copyright: (c) 2014-2015 by Halfmoon Labs, Inc. copyright: (c) 2016 by Blockstack.org This file is part of Blockstack Blockstack is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Blockstack is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Blockstack. If not, see <http://www.gnu.org/licenses/>. """ import testlib import virtualchain import json wallets = [ testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ), testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ), testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ), testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ), testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 ) ] debug = False consensus = "17ac43c1d8549c3181b200f1bf97eb7d" def scenario( wallets, **kw ): global debug, consensus resp = testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_namespace_ready( "test", wallets[1].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_update( "foo.test", "11" * 20, wallets[3].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_transfer( "foo.test", wallets[4].addr, True, wallets[3].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_transfer( "foo.test", wallets[3].addr, True, wallets[4].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) testlib.next_block( **kw ) resp = testlib.blockstack_name_update( "foo.test", "33" * 20, wallets[3].privkey ) if debug or 'error' in resp: print json.dumps( resp, indent=4 ) consensus = testlib.get_consensus_at( testlib.get_current_block(**kw), **kw ) testlib.next_block( **kw ) def check( state_engine ): global consensus # not revealed, but ready ns = state_engine.get_namespace_reveal( "test" ) if ns is not None: print "'test' not revealed" return False ns = state_engine.get_namespace( "test" ) if ns is None: print "'test' not found" return False if ns['namespace_id'] != 'test': print "'test' not returned" return False # not preordered preorder = state_engine.get_name_preorder( "foo.test", virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr ) if preorder is not None: print "'foo.test' still preordered" return False # registered name_rec = state_engine.get_name( "foo.test" ) if name_rec is None: print "'foo.test' not registered" return False # updated, and data is preserved if name_rec['value_hash'] != '33' * 20: print "'foo.test' invalid value hash" return False # transferred if name_rec['address'] != wallets[3].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[3].addr): print "'foo.test' invalid owner" return False # consensus from NAME_UPDATE if name_rec['consensus_hash'] != consensus: print "quirk not preserved: consensus hash %s != %s" % (name_rec['consensus_hash'], consensus) return False return True
xiaoshaozi52/ansible
refs/heads/devel
v1/ansible/inventory/ini.py
111
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors from ansible import utils import shlex import re import ast class InventoryParser(object): """ Host inventory for ansible. """ def __init__(self, filename=C.DEFAULT_HOST_LIST): with open(filename) as fh: self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} self._parse() def _parse(self): self._parse_base_groups() self._parse_group_children() self._add_allgroup_children() self._parse_group_variables() return self.groups @staticmethod def _parse_value(v): if "#" not in v: try: ret = ast.literal_eval(v) if not isinstance(ret, float): # Do not trim floats. Eg: "1.20" to 1.2 return ret # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: # For some reason this was thought to be malformed. pass except SyntaxError: # Is this a hash with an equals at the end? pass return v # [webservers] # alpha # beta:2345 # gamma sudo=True user=root # delta asdf=jkl favcolor=red def _add_allgroup_children(self): for group in self.groups.values(): if group.depth == 0 and group.name != 'all': self.groups['all'].add_child_group(group) def _parse_base_groups(self): # FIXME: refactor ungrouped = Group(name='ungrouped') all = Group(name='all') all.add_child_group(ungrouped) self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' for lineno in range(len(self.lines)): line = utils.before_comment(self.lines[lineno]).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) active_group_name = None elif active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) elif line.startswith(";") or line == '': pass elif active_group_name: tokens = shlex.split(line) if len(tokens) == 0: continue hostname = tokens[0] port = C.DEFAULT_REMOTE_PORT # Three cases to check: # 0. A hostname that contains a range pesudo-code and a port # 1. A hostname that contains just a port if hostname.count(":") > 1: # Possible an IPv6 address, or maybe a host line with multiple ranges # IPv6 with Port XXX:XXX::XXX.port # FQDN foo.example.com if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) elif ("[" in hostname and "]" in hostname and ":" in hostname and (hostname.rindex("]") < hostname.rindex(":")) or ("]" not in hostname and ":" in hostname)): (hostname, port) = hostname.rsplit(":", 1) hostnames = [] if detect_range(hostname): hostnames = expand_hostname_range(hostname) else: hostnames = [hostname] for hn in hostnames: host = None if hn in self.hosts: host = self.hosts[hn] else: host = Host(name=hn, port=port) self.hosts[hn] = host if len(tokens) > 1: for t in tokens[1:]: if t.startswith('#'): break try: (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] # atlanta # raleigh def _parse_group_children(self): group = None for lineno in range(len(self.lines)): line = self.lines[lineno].strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: line = line.replace("[","").replace(":children]","") group = self.groups.get(line, None) if group is None: group = self.groups[line] = Group(name=line) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif group: kid_group = self.groups.get(line, None) if kid_group is None: raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) else: group.add_child_group(kid_group) # [webservers:vars] # http_port=1234 # maxRequestsPerChild=200 def _parse_group_variables(self): group = None for lineno in range(len(self.lines)): line = self.lines[lineno].strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif line == '': pass elif group: if "=" not in line: raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {}
partofthething/home-assistant
refs/heads/dev
tests/components/hue/test_sensor_base.py
6
"""Philips Hue sensors platform tests.""" import asyncio from unittest.mock import Mock import aiohue from homeassistant.components.hue.hue_event import CONF_HUE_EVENT from .conftest import create_mock_bridge, setup_bridge_for_sensors as setup_bridge PRESENCE_SENSOR_1_PRESENT = { "state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Living room sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_1 = { "state": { "lightlevel": 1, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 1", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_1 = { "state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 1", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0402", "capabilities": {"certified": True}, } PRESENCE_SENSOR_2_NOT_PRESENT = { "state": {"presence": False, "lastupdated": "2019-01-01T00:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Kitchen sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_2 = { "state": { "lightlevel": 10001, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 2", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_2 = { "state": {"temperature": 1875, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 2", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0402", "capabilities": {"certified": True}, } PRESENCE_SENSOR_3_PRESENT = { "state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Bedroom sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_3 = { "state": { "lightlevel": 1, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 3", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_3 = { "state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 3", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0402", "capabilities": {"certified": True}, } UNSUPPORTED_SENSOR = { "state": {"status": 0, "lastupdated": "2019-01-01T01:00:00"}, "config": {"on": True, "reachable": True}, "name": "Unsupported sensor", "type": "CLIPGenericStatus", "modelid": "PHWA01", "manufacturername": "Philips", "swversion": "1.0", "uniqueid": "arbitrary", "recycle": True, } HUE_TAP_REMOTE_1 = { "state": {"buttonevent": 17, "lastupdated": "2019-06-22T14:43:50"}, "swupdate": {"state": "notupdatable", "lastinstall": None}, "config": {"on": True}, "name": "Hue Tap", "type": "ZGPSwitch", "modelid": "ZGPSWITCH", "manufacturername": "Philips", "productname": "Hue tap switch", "diversityid": "d8cde5d5-0eef-4b95-b0f0-71ddd2952af4", "uniqueid": "00:00:00:00:00:44:23:08-f2", "capabilities": {"certified": True, "primary": True, "inputs": []}, } HUE_DIMMER_REMOTE_1 = { "state": {"buttonevent": 4002, "lastupdated": "2019-12-28T21:58:02"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-10-13T13:16:15"}, "config": {"on": True, "battery": 100, "reachable": True, "pending": []}, "name": "Hue dimmer switch 1", "type": "ZLLSwitch", "modelid": "RWL021", "manufacturername": "Philips", "productname": "Hue dimmer switch", "diversityid": "73bbabea-3420-499a-9856-46bf437e119b", "swversion": "6.1.1.28573", "uniqueid": "00:17:88:01:10:3e:3a:dc-02-fc00", "capabilities": {"certified": True, "primary": True, "inputs": []}, } SENSOR_RESPONSE = { "1": PRESENCE_SENSOR_1_PRESENT, "2": LIGHT_LEVEL_SENSOR_1, "3": TEMPERATURE_SENSOR_1, "4": PRESENCE_SENSOR_2_NOT_PRESENT, "5": LIGHT_LEVEL_SENSOR_2, "6": TEMPERATURE_SENSOR_2, "7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1, } async def test_no_sensors(hass, mock_bridge): """Test the update_items function when no sensors are found.""" mock_bridge.allow_groups = True mock_bridge.mock_sensor_responses.append({}) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 0 async def test_sensors_with_multiple_bridges(hass, mock_bridge): """Test the update_items function with some sensors.""" mock_bridge_2 = create_mock_bridge(hass) mock_bridge_2.mock_sensor_responses.append( { "1": PRESENCE_SENSOR_3_PRESENT, "2": LIGHT_LEVEL_SENSOR_3, "3": TEMPERATURE_SENSOR_3, } ) mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) await setup_bridge(hass, mock_bridge_2, hostname="mock-bridge-2") assert len(mock_bridge.mock_requests) == 1 assert len(mock_bridge_2.mock_requests) == 1 # 3 "physical" sensors with 3 virtual sensors each + 1 battery sensor assert len(hass.states.async_all()) == 10 async def test_sensors(hass, mock_bridge): """Test the update_items function with some sensors.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 "physical" sensors with 3 virtual sensors each assert len(hass.states.async_all()) == 7 presence_sensor_1 = hass.states.get("binary_sensor.living_room_sensor_motion") light_level_sensor_1 = hass.states.get("sensor.living_room_sensor_light_level") temperature_sensor_1 = hass.states.get("sensor.living_room_sensor_temperature") assert presence_sensor_1 is not None assert presence_sensor_1.state == "on" assert light_level_sensor_1 is not None assert light_level_sensor_1.state == "1.0" assert light_level_sensor_1.name == "Living room sensor light level" assert temperature_sensor_1 is not None assert temperature_sensor_1.state == "17.75" assert temperature_sensor_1.name == "Living room sensor temperature" presence_sensor_2 = hass.states.get("binary_sensor.kitchen_sensor_motion") light_level_sensor_2 = hass.states.get("sensor.kitchen_sensor_light_level") temperature_sensor_2 = hass.states.get("sensor.kitchen_sensor_temperature") assert presence_sensor_2 is not None assert presence_sensor_2.state == "off" assert light_level_sensor_2 is not None assert light_level_sensor_2.state == "10.0" assert light_level_sensor_2.name == "Kitchen sensor light level" assert temperature_sensor_2 is not None assert temperature_sensor_2.state == "18.75" assert temperature_sensor_2.name == "Kitchen sensor temperature" battery_remote_1 = hass.states.get("sensor.hue_dimmer_switch_1_battery_level") assert battery_remote_1 is not None assert battery_remote_1.state == "100" assert battery_remote_1.name == "Hue dimmer switch 1 battery level" async def test_unsupported_sensors(hass, mock_bridge): """Test that unsupported sensors don't get added and don't fail.""" response_with_unsupported = dict(SENSOR_RESPONSE) response_with_unsupported["7"] = UNSUPPORTED_SENSOR mock_bridge.mock_sensor_responses.append(response_with_unsupported) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 "physical" sensors with 3 virtual sensors each + 1 battery sensor assert len(hass.states.async_all()) == 7 async def test_new_sensor_discovered(hass, mock_bridge): """Test if 2nd update has a new sensor.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 7 new_sensor_response = dict(SENSOR_RESPONSE) new_sensor_response.update( { "9": PRESENCE_SENSOR_3_PRESENT, "10": LIGHT_LEVEL_SENSOR_3, "11": TEMPERATURE_SENSOR_3, } ) mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 10 presence = hass.states.get("binary_sensor.bedroom_sensor_motion") assert presence is not None assert presence.state == "on" temperature = hass.states.get("sensor.bedroom_sensor_temperature") assert temperature is not None assert temperature.state == "17.75" async def test_sensor_removed(hass, mock_bridge): """Test if 2nd update has removed sensor.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 7 mock_bridge.mock_sensor_responses.clear() keys = ("1", "2", "3") mock_bridge.mock_sensor_responses.append({k: SENSOR_RESPONSE[k] for k in keys}) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() # To flush out the service call to update the group await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 3 sensor = hass.states.get("binary_sensor.living_room_sensor_motion") assert sensor is not None removed_sensor = hass.states.get("binary_sensor.kitchen_sensor_motion") assert removed_sensor is None async def test_update_timeout(hass, mock_bridge): """Test bridge marked as not available if timeout error during update.""" mock_bridge.api.sensors.update = Mock(side_effect=asyncio.TimeoutError) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 async def test_update_unauthorized(hass, mock_bridge): """Test bridge marked as not authorized if unauthorized during update.""" mock_bridge.api.sensors.update = Mock(side_effect=aiohue.Unauthorized) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 assert len(mock_bridge.handle_unauthorized_error.mock_calls) == 1 async def test_hue_events(hass, mock_bridge): """Test that hue remotes fire events when pressed.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) mock_listener = Mock() unsub = hass.bus.async_listen(CONF_HUE_EVENT, mock_listener) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 7 assert len(mock_listener.mock_calls) == 0 new_sensor_response = dict(SENSOR_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 7 assert len(mock_listener.mock_calls) == 1 assert mock_listener.mock_calls[0][1][0].data == { "id": "hue_tap", "unique_id": "00:00:00:00:00:44:23:08-f2", "event": 18, "last_updated": "2019-12-28T22:58:02", } new_sensor_response = dict(new_sensor_response) new_sensor_response["8"]["state"] = { "buttonevent": 3002, "lastupdated": "2019-12-28T22:58:01", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(hass.states.async_all()) == 7 assert len(mock_listener.mock_calls) == 2 assert mock_listener.mock_calls[1][1][0].data == { "id": "hue_dimmer_switch_1", "unique_id": "00:17:88:01:10:3e:3a:dc-02-fc00", "event": 3002, "last_updated": "2019-12-28T22:58:01", } # Add a new remote. In discovery the new event is registered **but not fired** new_sensor_response = dict(new_sensor_response) new_sensor_response["21"] = { "state": { "rotaryevent": 2, "expectedrotation": 208, "expectedeventduration": 400, "lastupdated": "2020-01-31T15:56:19", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-11-26T03:35:21"}, "config": {"on": True, "battery": 100, "reachable": True, "pending": []}, "name": "Lutron Aurora 1", "type": "ZLLRelativeRotary", "modelid": "Z3-1BRL", "manufacturername": "Lutron", "productname": "Lutron Aurora", "diversityid": "2c3a75ff-55c4-4e4d-8c44-82d330b8eb9b", "swversion": "3.4", "uniqueid": "ff:ff:00:0f:e7:fd:bc:b7-01-fc00-0014", "capabilities": { "certified": True, "primary": True, "inputs": [ { "repeatintervals": [400], "events": [ {"rotaryevent": 1, "eventtype": "start"}, {"rotaryevent": 2, "eventtype": "repeat"}, ], } ], }, } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 4 assert len(hass.states.async_all()) == 8 assert len(mock_listener.mock_calls) == 2 # A new press fires the event new_sensor_response["21"]["state"]["lastupdated"] = "2020-01-31T15:57:19" mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 5 assert len(hass.states.async_all()) == 8 assert len(mock_listener.mock_calls) == 3 assert mock_listener.mock_calls[2][1][0].data == { "id": "lutron_aurora_1", "unique_id": "ff:ff:00:0f:e7:fd:bc:b7-01-fc00-0014", "event": 2, "last_updated": "2020-01-31T15:57:19", } unsub()
dslomov/intellij-community
refs/heads/master
python/testData/inspections/AugmentAssignmentWithContext.py
83
class A: x = 3 a = A() <weak_warning descr="Assignment can be replaced with augmented assignment">a.x = a.x +<caret> 1</weak_warning>
tymiles003/openwebrtc
refs/heads/master
bindings/java/c_generator.py
31
# Copyright (c) 2014-2015, Ericsson AB. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or other # materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. import config from functools import partial from collections import defaultdict from itertools import imap from java_type_signatures import type_signatures from base_generator import * C = BaseGenerator( default_line_prefix=config.C_INDENTATION, ) def jni_param(param): if param.jni_type: return param.jni_type + ' ' + param.jni_name return () def c_param(param): if param.c_type: return param.c_type + ' ' + param.c_name return () def c_arg(param): if param.c_type: return param.c_name return () def jni_arg(param): if param.jni_type: return param.jni_name return () @add_to(C) class Log(C.Lines): def __init__(self, level, msg, *args): self.msg = msg self.args = args self.level = level def _make_logfunc(level): @classmethod def logfunc(cls, msg, *args): return cls(level, msg, *args) return logfunc error = _make_logfunc('error') warning = _make_logfunc('warning') debug = _make_logfunc('debug') info = _make_logfunc('info') verbose = _make_logfunc('verbose') def __iter__(self): yield C.Call('log_' + self.level, quot(self.msg), *self.args) @add_to(C) class Assert(C.Lines): def __init__(self, val): self.val = val def __iter__(self): yield semi('g_assert(' + flatjoin(self.val, '') + ')') @add_to(C) class Throw(C.Lines): def __init__(self, *args): self.args = args def __iter__(self): yield 'THROW(' + flatjoin(self.args, '') + ');' @add_to(C) class ExceptionCheck(C.Lines): def __init__(self, value): self.value = value def __iter__(self): yield C.If(C.Env('ExceptionCheck'), C.Log('warning', 'exception at %s:%d', '__FILE__', '__LINE__'), C.Env('ExceptionDescribe'), C.Return(self.value), ) @classmethod def default(cls, value): return cls(value.parent.return_value.default_value) @add_to(C) class CommentHeader(C.Comment): def __iter__(self): l = len(self.text) yield '/**' + l * '*' + '**/' yield '/* ' + self.text + ' */' yield '/**' + l * '*' + '**/' @add_to(C) class Function(C.FunctionBlock): modifiers = ['static'] def __init__(self, name, return_type='void', params=None, **kwargs): super(Function, self).__init__(**kwargs) self.name = name self.return_type = return_type self.params = params or [] @property def start(self): return [self.definition, '{'] @staticmethod def callback(callback, body=None, **kwargs): args = { 'return_type': callback.params.return_value.c_type, 'name': 'callback_' + callback.value.gir_type, 'params': map(c_param, callback.params), 'body': [TypeConversions.params_to_jni(callback.params, body=body or [], push_frame=True)], } if callback.params.return_value.name is not None: args['body'] += [C.Return(callback.params.return_value.c_name)] args.update(kwargs) return C.Function(**args) @add_to(C) class JniExport(C.FunctionBlock): modifiers = ['JNIEXPORT'] def __init__(self, package=None, clazz=None, subclass=None, method_name=None, return_type='void', params=None, **kwargs): super(JniExport, self).__init__(**kwargs) self.package = package self.clazz = clazz self.subclass = subclass self.method_name = method_name self.return_type = return_type self.java_params = params or [] @property def name(self): return '_'.join(prune_empty('Java', self.package.replace('.', '_'), self.clazz, self.subclass, self.method_name, )) @property def params(self): return ['JNIEnv* env'] + self.java_params @property def start(self): return [self.definition, '{'] @staticmethod def default(function, body=[], **kwargs): params = map(jni_param, function.params.java_params) if function.params.instance_param is None: params = ['jclass jclazz'] + params else: params = [jni_param(function.params.instance_param)] + params args = { 'return_type': function.params.return_value.jni_type, 'method_name': function.name, 'params': params, 'body': [C.TypeConversions.params_to_c(function.params, body=body, get_env=False)], } if function.params.return_value.name is not None: args['body'] += [C.Return(function.params.return_value.jni_name)] args.update(kwargs) return JniExport(**args) @add_to(C) class Helper(C.Call): helper_functions = {} used_helpers = [] def __init__(self, name, *args): super(Helper, self).__init__(name, *args) func = self.helper_functions.pop(name, None) if func is not None: self.used_helpers.append(func) @classmethod def add_helper(cls, name, func): cls.helper_functions[name] = func @classmethod def enumerate_used_helpers(cls): return cls.used_helpers @add_to(C) class Cache(C.Lines): cached_classes = defaultdict(partial(defaultdict, dict)) def __init__(self, *args): self.args = list(args) def __iter__(self): yield 'cache_' + flatjoin(self.args, '_') @classmethod def clazz(cls, *args): classname = flatjoin(args, '$') cls.cached_classes[type_signatures[classname]['_path']] return cls(*args) def _make_cacher(func): @classmethod def cacher(cls, *args): methodname = args[-1] signatures = type_signatures[flatjoin(args[:-1], '$')] cls.cached_classes[signatures['_path']][func][methodname] = signatures[methodname] return cls(*args) return cacher method = _make_cacher('GetMethodID') static_method = _make_cacher('GetStaticMethodID') field = _make_cacher('GetFieldID') static_field = _make_cacher('GetStaticFieldID') @classmethod def default_class(cls, clazz): cls.cached_classes[clazz.java_class_path] return cls(clazz.java_type) @classmethod def default_method(cls, func): val = func.value args = None if hasattr(val, 'outer_java_type'): args = [val.outer_java_type, val.java_type, func.name] else: args = [val.java_type, func.name] cls.cached_classes[val.java_class_path]['GetMethodID'][func.name] = func.method_signature return cls(*args) @classmethod def default_enum_member(cls, enum, member): typ = enum.type if hasattr(enum.type, 'inner_type'): typ = enum.type.inner_type cls.cached_classes[typ.java_class_path]['GetStaticFieldID'][member.name] = typ.java_signature return cls(enum.name, member.name) @classmethod def enumerate_cached_classes(cls): cache_declarations = [] jni_onload_cache = [] for classpath, clazz in Cache.cached_classes.items(): classname = classpath[classpath.rfind('/')+1:] to_cache_var = lambda *args: '_'.join(['cache'] + classname.split('$') + list(args)) classvar = to_cache_var() cache_declarations += [C.Decl('static jclass', classvar)] jni_onload_cache += [ C.Assign(classvar, C.Env('FindClass', quot(classpath))), C.ExceptionCheck('0'), C.Assign(classvar, C.Env('NewGlobalRef', classvar)), C.ExceptionCheck('0'), ] for getfunc, method in clazz.items(): var_type = 'jmethodID' if 'Method' in getfunc else 'jfieldID' for methodname, signature in method.items(): methodvar = to_cache_var(methodname) if methodname == '_constructor': methodname = '<init>' cache_declarations += [C.Decl('static ' + var_type, methodvar)] jni_onload_cache += [ C.Log('debug', 'getting %s.%s', quot(classname), quot(methodname)), C.Assign(methodvar, C.Env(getfunc, classvar, quot(methodname), quot(signature))), C.ExceptionCheck('0'), ] cache_declarations.append('') jni_onload_cache.append('') return cache_declarations[:-1], jni_onload_cache[:-1] @add_to(C) class Env(C.Lines): return_type_table = { 'V': 'Void', ';': 'Object', 'Z': 'Boolean', 'B': 'Byte', 'C': 'Char', 'S': 'Short', 'I': 'Int', 'J': 'Long', 'F': 'Float', 'D': 'Double', } def __init__(self, name, *args): self.name = name self.args = args @staticmethod def tuple_to_type(args): clazz = type_signatures[flatjoin(args[:-1], '$')] method = clazz[args[-1]] return Env.return_type_table[method[-1]] @classmethod def method(cls, name, method_tuple, *args): return cls('Call' + Env.tuple_to_type(method_tuple) + 'Method', name, C.Cache.method(*method_tuple), *args) @classmethod def static_method(cls, method_tuple, *args): return cls('CallStatic' + Env.tuple_to_type(method_tuple) + 'Method', C.Cache.clazz(method_tuple[:-1]), C.Cache.static_method(*method_tuple), *args) @classmethod def field(cls, name, field_tuple): return cls('Get' + Env.tuple_to_type(field_tuple) + 'Field', name, C.Cache.field(*field_tuple)) @classmethod def new(cls, clazz, *args): return cls('NewObject', C.Cache.clazz(clazz), C.Cache.method(clazz, '_constructor'), *args) @classmethod def throw(cls, clazz, msg): return cls('ThrowNew', C.Cache.clazz(clazz), msg) @classmethod def callback(cls, callback): type = Env.return_type_table[callback.params.return_value.java_signature[-1]] cached = None if hasattr(callback.value, 'outer_java_type'): cached = (callback.value.outer_java_type, callback.value.java_type, callback.name) else: cached = (callback.value.java_type, callback.name) return cls('Call' + type + 'Method', map(jni_arg, callback.params.closure_params), C.Cache.default_method(callback), *map(jni_arg, callback.params.java_params) ) def __iter__(self): yield semi('(*env)->{name}({args})'.format( name=self.name, args=flatjoin(['env'] + list(flatten(self.args)), ', '), )) @add_to(C) class TypeConversions(C.Lines): def __init__(self, conversions, return_conversion, body=None, get_env=True, push_frame=False, **kwargs): super(TypeConversions, self).__init__(**kwargs) self.conversions = list(conversions) self.return_conversion = return_conversion self.body = body or [] self.get_env = get_env self.push_frame = push_frame def __iter__(self): conversion = [ prune_empty([p.declarations for p in self.conversions] + [self.get_env and C.Decl('JNIEnv*', 'env')]), self.get_env and C.Assign('env', C.Call('get_jni_env')), C.If(Env('PushLocalFrame', str(config.LOCAL_FRAME_SIZE)), C.Log('warning', 'failed to push local frame at %s:%d', '__FILE__', '__LINE__') ) if self.push_frame else [], prune_empty([p.conversion for p in self.conversions]), self.body, prune_empty(p.cleanup for p in reversed(self.conversions)), Env('PopLocalFrame', 'NULL') if self.push_frame else [], ] if self.return_conversion is not None: conversion = [self.return_conversion.declarations] + conversion + [ self.return_conversion.conversion, self.return_conversion.cleanup, ] return iter(intersperse(prune_empty(conversion), '')) @staticmethod def params_to_c(params, **kwargs): ret = params.return_value return TypeConversions([param.transform_to_c() for param in params], ret.transform_to_jni() if ret.name is not None else None, **kwargs) @staticmethod def params_to_jni(params, **kwargs): ret = params.return_value return TypeConversions([param.transform_to_jni() for param in params], ret.transform_to_c() if ret.name is not None else None, **kwargs) def make_function_gen(package, classname): def gen(function): call = C.Call(function.c_name, map(c_arg, function.params)) ret = function.params.return_value if ret.name is not None: call = C.Assign(ret.c_name, call) out = JniExport.default(function, package=package, clazz=classname, body=call) if ret.name is not None: out.body = [C.Decl(ret.c_type, ret.c_name)] + out.body return out return gen def make_callback_gen(package, classname): def gen(callback): call = C.Env.callback(callback) ret = callback.params.return_value if ret.name is not None: call = C.Assign(ret.jni_name, call) out = C.Function.callback(callback, package=package, clazz=classname, body=call) if ret.name is not None: out.body = [C.Decl(ret.jni_type, ret.jni_name)] + out.body return out return gen def make_signal_accessors_gen(package, classname): def gen(signal): connect_args = map(c_arg, signal.add_listener.params) connect_args[0] = 'G_OBJECT(' + connect_args[0] + ')' connect_args.insert(1, quot(signal.signal_name)) connect_args += [C.Helper('jobject_wrapper_closure_notify').name, '0'] ret = signal.add_listener.params.return_value connecter = C.JniExport.default(signal.add_listener, package=package, clazz=classname, body=[C.Assign(ret.c_name, C.Call('g_signal_connect_data', connect_args))], ) connecter.body = [C.Decl(ret.c_type, ret.c_name)] + connecter.body disconnect_args = map(c_arg, signal.remove_listener.params) disconnect_args[0] = 'G_OBJECT(' + disconnect_args[0] + ')' disconnecter = C.JniExport.default(signal.remove_listener, package=package, clazz=classname, body=C.Call('g_signal_handler_disconnect', disconnect_args), ) return [connecter, disconnecter] return gen def gen_class(package, clazz): body = [C.CommentHeader(clazz.name)] gen_signal_accessors = make_signal_accessors_gen(package, clazz.name) for attr in ['constructors', 'functions', 'methods']: body += [C.Comment(attr) if getattr(clazz, attr) else None] body += map(make_function_gen(package, clazz.name), getattr(clazz, attr)) for interface in clazz.interfaces: body += map(make_function_gen(package, clazz.name), interface.methods) body += [C.Comment('signals') if clazz.signals else None] body += map(make_callback_gen(package, clazz.name), clazz.signals) body += map(gen_signal_accessors, clazz.signals) body += [C.Comment('properties') if clazz.properties else None] for prop in clazz.properties: body += [C.Comment(prop.name)] if prop.readable: # getter ret = prop.getter.params.return_value get_params = map(c_arg, prop.getter.params) + [quot(prop.name), '&' + ret.c_name, 'NULL'] func = C.JniExport.default(prop.getter, package=package, clazz=clazz.name, body=[ C.Call('g_object_get', get_params), ]) if ret.name is not None: func.body = [C.Decl(ret.c_type, ret.c_name)] + func.body body.append(func) # change listener transform = ret.transform_to_jni() func = C.Function( package=package, clazz=clazz.name, name='callback_' + prop.signal.value.gir_type, return_type=prop.signal.params.return_value.c_type, params=map(c_param, prop.signal.params), body=[TypeConversions([p.transform_to_jni() for p in prop.signal.params.params], None, push_frame=True, body=[ '(void) c_pspec;', C.Call('g_object_get', get_params), transform.conversion, C.Env.callback(prop.signal), transform.cleanup, ])], ) func.body = [ C.Decl(ret.c_type, ret.c_name), transform.declarations, ] + func.body body.append(func) body += gen_signal_accessors(prop.signal) if prop.writable: # setter ret = prop.setter.params.return_value params = map(c_arg, prop.setter.params) params.insert(1, quot(prop.name)) params.append('NULL') func = C.JniExport.default(prop.setter, package=package, clazz=clazz.name, body=[ C.Call('g_object_set', params) ]) body += [func] return intersperse(prune_empty(body), '') def gen_namespace(namespace, package): body = [] package = package + '.' + namespace.symbol_prefix body += map(make_callback_gen(package, namespace.identifier_prefix), namespace.callbacks) body += map(make_function_gen(package, namespace.identifier_prefix), namespace.functions) body += map(partial(gen_class, package), namespace.classes) return body def add_helpers(namespace): for enum in namespace.enums: C.Helper.add_helper(enum.name + '_to_java_enum', C.Function(enum.name + '_to_java_enum', return_type='jobject', params=['JNIEnv* env', enum.type.c_type + ' value'], body=[ C.Decl('jfieldID', 'fieldId'), C.Decl('jobject', 'result'), '', C.Switch('value', cases=[ (member.c_name, C.Assign('fieldId', C.Cache.default_enum_member(enum, member))) for member in enum.members ]), '', C.Assert('fieldId'), C.Assign('result', Env('GetStaticObjectField', C.Cache(enum.name), 'fieldId')), C.ExceptionCheck('NULL'), C.Return('result'), ] ) ) def gen_source(namespaces, include_headers): body = [] package = config.PACKAGE_ROOT for namespace in namespaces: add_helpers(namespace) for namespace in namespaces: body += gen_namespace(namespace, package) jobject_wrapper_struct = C.Block( _start = 'typedef union {', body = [ C.Decl('jobject', 'obj'), C.Decl('jweak', 'weak'), ], _end = '} JObjectWrapper;', ) jobject_callback_wrapper_struct = C.Block( _start = 'typedef struct {', body = [ C.Decl('JObjectWrapper', '*wrapper'), C.Decl('gboolean', 'should_destroy'), ], _end = '} JObjectCallbackWrapper;', ) native_destructor = [C.JniExport( package=package, clazz='NativeInstance', method_name='nativeDestructor', return_type='void', params=['jclass clazz', 'jlong instance_pointer'], body=[ C.Decl('GWeakRef*', 'ref'), C.Decl('GObject*', 'gobj'), C.Decl('JObjectWrapper*', 'wrapper'), '(void) clazz;', '', C.Assign('ref', 'instance_pointer', cast='GWeakRef*'), C.Assign('gobj', C.Call('g_weak_ref_get', 'ref')), C.Call('g_weak_ref_clear', 'ref'), C.Call('g_free', 'ref'), '', C.If('!gobj', C.Env.throw('IllegalStateException', '"GObject ref was NULL at finalization"'), C.Return()), C.Log('debug', 'unrefing GObject[%p]', 'gobj'), C.Assign('wrapper', C.Call('g_object_get_data', 'gobj', '"java_instance"'), cast='JObjectWrapper*'), C.If('wrapper', [ C.Call('g_object_set_data', 'gobj', '"java_instance"', 'NULL'), C.Helper('jobject_wrapper_destroy', 'wrapper', 'TRUE'), ]), C.Call('g_object_unref', 'gobj'), ]), ] helper_functions = Helper.enumerate_used_helpers() gobject_class_cache = [ C.Call('g_hash_table_insert', 'gobject_to_java_class_map', C.Call(clazz.glib_get_type), Cache.default_class(clazz.value)) for clazz in namespace.classes for namespace in namespaces]; # cached classes need to be enumerated last cache_declarations, jni_onload_cache = C.Cache.enumerate_cached_classes() jni_onload = Function( name='JNI_OnLoad', return_type='jint', params=['JavaVM* vm', 'void* reserved'], modifiers=[], body=[ C.Decl('JNIEnv*', 'env'), '', C.Assign('jvm', 'vm'), C.Assign('env', C.Call('get_jni_env')), '', jni_onload_cache, '', C.Assign('gobject_to_java_class_map', C.Call('g_hash_table_new', 'g_direct_hash', 'g_direct_equal')), '', gobject_class_cache, '', C.Return('JNI_VERSION_1_6'), ] ) include_headers = ['jni.h', 'android/log.h'] + include_headers includes = '\n'.join('#include <' + h + '>' for h in include_headers) body = [ includes, HEADER, cache_declarations, C.Decl('static GHashTable*', 'gobject_to_java_class_map'), GET_JNI_ENV, jni_onload, jobject_wrapper_struct, jobject_callback_wrapper_struct, ] + helper_functions + [native_destructor] + body body = intersperse(prune_empty(body), '') return flatjoin(body, '\n') HEADER = """ #define android_assert(st) if (!(st)) {{ __android_log_write(ANDROID_LOG_ERROR, "OpenWebRTC", "Assertion failed at "G_STRINGIFY(__LINE__));}} #undef g_assert #define g_assert android_assert #define log_verbose(st, ...) __android_log_print(ANDROID_LOG_VERBOSE, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__); #define log_debug(st, ...) __android_log_print(ANDROID_LOG_DEBUG, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__); #define log_info(st, ...) __android_log_print(ANDROID_LOG_INFO, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__); #define log_warning(st, ...) __android_log_print(ANDROID_LOG_WARN, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__); #define log_error(st, ...) __android_log_print(ANDROID_LOG_ERROR, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__); """.format(config.LOG_TAG) GET_JNI_ENV = [ C.Decl('static JavaVM*', 'jvm'), C.Decl('static pthread_key_t', 'pthread_detach_key = 0'), '', C.Function('detach_current_thread', params=['void* pthread_key'], body=[ C.Decl('(void)', 'pthread_key'), C.Call('g_return_if_fail', 'jvm'), '', C.Log.debug('JNI: detaching current thread from Java VM: %ld', C.Call('pthread_self')), '', C.Call('(*jvm)->DetachCurrentThread', 'jvm'), C.Call('pthread_setspecific', 'pthread_detach_key', 'NULL'), ] ), '', C.Function('get_jni_env', return_type='JNIEnv*', params=[], body=[ C.Decl('JNIEnv*', 'env'), C.Decl('int', 'ret'), '', C.Assign('env', 'NULL'), C.Assign('ret', C.Call('(*jvm)->GetEnv', 'jvm', '(void**)&env', 'JNI_VERSION_1_6')), '', C.IfElse(ifs=['ret == JNI_EDETACHED', 'ret == JNI_EVERSION'], bodies=[ C.IfElse(ifs=['(*jvm)->AttachCurrentThread(jvm, (JNIEnv**) &env, NULL) != 0'], bodies=[ C.Log.error('JNI: failed to attach thread'), [ C.Log.info('JNI: successfully attached to thread'), C.If(C.Call('pthread_key_create', '&pthread_detach_key', 'detach_current_thread'), C.Log.error('JNI: failed to set detach callback')), C.Call('pthread_setspecific', 'pthread_detach_key', 'jvm'), ] ]), C.Log.error('JNI: version not supported'), ] ), '', C.Assert('env'), C.Return('env'), ] ), ]
NESCent/feedingdb
refs/heads/master
src/feeddb/feed/forms.py
1
from haystack.forms import FacetedSearchForm from haystack.inputs import AutoQuery, Exact, Clean from haystack.query import RelatedSearchQuerySet from inspector_panel import debug from django import forms from django.forms.fields import ChoiceField, BooleanField from django.forms.widgets import RadioSelect from django.core.urlresolvers import reverse from django.forms.models import model_to_dict, fields_for_model from collections import OrderedDict from django.contrib.auth.models import User from feeddb.feed.models import Trial, Session, Experiment, Subject, Study, FeedUserProfile from faceted_search.searcher import Searcher import logging # Get an instance of a logger logger = logging.getLogger(__name__) my_facet_config = { 'fields': { 'analoc': { 'label': 'Anatomical Location' }, 'behaviorowl_primary_ancestors': { 'label': 'Primary Behavior' }, 'taxon': { 'label': 'Species' }, 'food_type': { 'label': 'Food Type' }, 'techniques': { 'label': 'Sensor Type' }, } } class UserOwnProfileForm(forms.ModelForm): _user_fields = ('email', 'first_name', 'last_name') _field_order = ('first_name', 'last_name', 'email', 'institutional_affiliation') def __init__(self, initial=None, instance=None, *args, **kwargs): _user_initial = model_to_dict(instance.user, self._user_fields) if instance is not None else {} initial.update(_user_initial) super(UserOwnProfileForm, self).__init__(initial=initial, instance=instance, *args, **kwargs) self.fields.update(fields_for_model(User, self._user_fields)) # reorder fields according to order above self.fields = OrderedDict((k, self.fields[k]) for k in self._field_order) class Meta: model = FeedUserProfile fields = '__all__' def save(self, *args, **kwargs): u = self.instance.user for field in self._user_fields: setattr(u, field, self.cleaned_data[field]) u.save() profile = super(UserOwnProfileForm, self).save(*args, **kwargs) return profile class ModelCloneForm(forms.Form): source = forms.ModelChoiceField(queryset=None) recurse = forms.BooleanField(required=False) def __init__(self, container=None, *args, **kwargs): self.clone_subject = kwargs.pop('clone_subject', False) super(ModelCloneForm, self).__init__(*args, **kwargs) ContainerModel = type(container) if ContainerModel == Session: qs = Trial.objects.filter(session=container) elif ContainerModel == Experiment: qs = Session.objects.filter(experiment=container) elif ContainerModel == Study: if self.clone_subject: qs = Subject.objects.filter(study=container) else: qs = Experiment.objects.filter(study=container) elif container == None: qs = Study.objects.all() else: raise ValueError("ModelCloneForm does not support container model type %s" % ContainerModel) self.fields['source'].queryset = qs self.feed_source_count = len(qs) self.container = container def action_url(self): "Get url for action attribute of form tag. See ../urls.py" if self.container == None: return reverse('clone_study') elif self.clone_subject: kwargs = { 'container_pk': self.container.pk, } return reverse('clone_subject_from_study', kwargs=kwargs) else: kwargs = { 'container_type': type(self.container).__name__.lower(), 'container_pk': self.container.pk, } return reverse('clone_from_container', kwargs=kwargs) @classmethod def factory(cls, modeladmin, request): # fake context so I can re-use get_current_containers() context = { 'opts': modeladmin.model._meta, 'request': request } from feeddb.feed.templatetags.upload_status import get_current_containers containers = get_current_containers(context) try: # move down the tree until they don't exist anymore container = None container = containers['study'] container = containers['experiment'] container = containers['session'] except KeyError: pass # Special case for subject, because its container is the same as when # cloning an experiment. if context['opts'].model_name == 'subject': return cls(container=container, clone_subject=True, prefix='clone') else: return cls(container=container, prefix='clone') class FeedSearchForm(FacetedSearchForm): per_page = ChoiceField( choices=[(n, n) for n in (10, 30, 50, 100, 200)], initial=10, widget=RadioSelect(), ) def __init__(self, GET, *args, **kwargs): # Save all GET parameters in case they match facets and not form fields self.filters = GET super(FeedSearchForm, self).__init__(GET, *args, **kwargs) self.searcher = Searcher(model=Trial, facets=my_facet_config, queryset=RelatedSearchQuerySet()) def no_query_found(self): """ Override no_query_found behavior to return all results when no keyword search is provided """ return self.searchqueryset.all() def search(self): # Get keywords from form, defaulting to empty string try: q = self.cleaned_data['q'] except (AttributeError, KeyError): # if self.cleaned_data doesn't exist q = '' sqs = self.searcher.search(filters=self.filters, keywords=q) sqs = sqs.order_by('taxon') sqs = sqs.load_all() return sqs
amaozhao/algorithms
refs/heads/master
tests/test_stack.py
2
from algorithms.stack import ( first_is_consecutive, second_is_consecutive, is_sorted, remove_min, first_stutter, second_stutter, first_switch_pairs, second_switch_pairs, is_valid, simplify_path, ArrayStack, LinkedListStack, OrderedStack ) import unittest class TestSuite(unittest.TestCase): def test_is_consecutive(self): self.assertTrue(first_is_consecutive([3, 4, 5, 6, 7])) self.assertFalse(first_is_consecutive([3, 4, 6, 7])) self.assertFalse(first_is_consecutive([3, 2, 1])) self.assertTrue(second_is_consecutive([3, 4, 5, 6, 7])) self.assertFalse(second_is_consecutive([3, 4, 6, 7])) self.assertFalse(second_is_consecutive([3, 2, 1])) def test_is_sorted(self): # Test case: bottom [6, 3, 5, 1, 2, 4] top self.assertFalse(is_sorted([6, 3, 5, 1, 2, 4])) self.assertTrue(is_sorted([1, 2, 3, 4, 5, 6])) self.assertFalse(is_sorted([3, 4, 7, 8, 5, 6])) def test_remove_min(self): # Test case: bottom [2, 8, 3, -6, 7, 3] top self.assertEqual([2, 8, 3, 7, 3], remove_min([2, 8, 3, -6, 7, 3])) # Test case: 2 smallest value [2, 8, 3, 7, 3] self.assertEqual([4, 8, 7], remove_min([4, 8, 3, 7, 3])) def test_stutter(self): # Test case: bottom [3, 7, 1, 14, 9] top self.assertEqual([3, 3, 7, 7, 1, 1, 14, 14, 9, 9], first_stutter([3, 7, 1, 14, 9])) self.assertEqual([3, 3, 7, 7, 1, 1, 14, 14, 9, 9], second_stutter([3, 7, 1, 14, 9])) def test_switch_pairs(self): # Test case: even number of values in stack # bottom [3, 8, 17, 9, 1, 10] top self.assertEqual([8, 3, 9, 17, 10, 1], first_switch_pairs([3, 8, 17, 9, 1, 10])) self.assertEqual([8, 3, 9, 17, 10, 1], second_switch_pairs([3, 8, 17, 9, 1, 10])) # Test case: odd number of values in stack # bottom [3, 8, 17, 9, 1] top self.assertEqual([8, 3, 9, 17, 1], first_switch_pairs([3, 8, 17, 9, 1])) self.assertEqual([8, 3, 9, 17, 1], second_switch_pairs([3, 8, 17, 9, 1])) def test_is_valid_parenthesis(self): self.assertTrue(is_valid("[]")) self.assertTrue(is_valid("[]()[]")) self.assertFalse(is_valid("[[[]]")) self.assertTrue(is_valid("{([])}")) self.assertFalse(is_valid("(}")) def test_simplify_path(self): p = '/my/name/is/..//keon' self.assertEqual('/my/name/keon', simplify_path(p)) class TestStack(unittest.TestCase): def test_ArrayStack(self): stack = ArrayStack() stack.push(1) stack.push(2) stack.push(3) # test __iter__() it = iter(stack) self.assertEqual(3, next(it)) self.assertEqual(2, next(it)) self.assertEqual(1, next(it)) self.assertRaises(StopIteration, next, it) # test __len__() self.assertEqual(3, len(stack)) # test __str__() self.assertEqual(str(stack), "Top-> 3 2 1") # test is_empty() self.assertFalse(stack.is_empty()) # test peek() self.assertEqual(3, stack.peek()) # test pop() self.assertEqual(3, stack.pop()) self.assertEqual(2, stack.pop()) self.assertEqual(1, stack.pop()) self.assertTrue(stack.is_empty()) def test_LinkedListStack(self): stack = LinkedListStack() stack.push(1) stack.push(2) stack.push(3) # test __iter__() it = iter(stack) self.assertEqual(3, next(it)) self.assertEqual(2, next(it)) self.assertEqual(1, next(it)) self.assertRaises(StopIteration, next, it) # test __len__() self.assertEqual(3, len(stack)) # test __str__() self.assertEqual(str(stack), "Top-> 3 2 1") # test is_empty() self.assertFalse(stack.is_empty()) # test peek() self.assertEqual(3, stack.peek()) # test pop() self.assertEqual(3, stack.pop()) self.assertEqual(2, stack.pop()) self.assertEqual(1, stack.pop()) self.assertTrue(stack.is_empty()) class TestOrderedStack(unittest.TestCase): def test_OrderedStack(self): stack = OrderedStack() self.assertTrue(stack.is_empty()) stack.push(1) stack.push(4) stack.push(3) stack.push(6) "bottom - > 1 3 4 6 " self.assertEqual(6, stack.pop()) self.assertEqual(4, stack.peek()) self.assertEqual(3, stack.size()) if __name__ == "__main__": unittest.main()
lgarren/spack
refs/heads/develop
var/spack/repos/builtin/packages/xphelloworld/package.py
3
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Xphelloworld(AutotoolsPackage): """Xprint sample applications.""" homepage = "http://cgit.freedesktop.org/xorg/app/xphelloworld" url = "https://www.x.org/archive/individual/app/xphelloworld-1.0.1.tar.gz" version('1.0.1', 'b1851337a8e850d5c8e5a5ca5e3033da') depends_on('libx11') depends_on('libxaw') depends_on('libxprintapputil') depends_on('libxprintutil') depends_on('libxp') depends_on('libxt') # FIXME: xphelloworld requires libxaw8, but libxaw only provides 6 and 7. # It looks like xprint support was removed from libxaw at some point. # But even the oldest version of libxaw doesn't build libxaw8. depends_on('pkg-config@0.9.0:', type='build') depends_on('util-macros', type='build')
westinedu/wrgroups
refs/heads/master
django/contrib/localflavor/sk/sk_districts.py
543
""" Slovak districts according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska """ from django.utils.translation import ugettext_lazy as _ DISTRICT_CHOICES = ( ('BB', _('Banska Bystrica')), ('BS', _('Banska Stiavnica')), ('BJ', _('Bardejov')), ('BN', _('Banovce nad Bebravou')), ('BR', _('Brezno')), ('BA1', _('Bratislava I')), ('BA2', _('Bratislava II')), ('BA3', _('Bratislava III')), ('BA4', _('Bratislava IV')), ('BA5', _('Bratislava V')), ('BY', _('Bytca')), ('CA', _('Cadca')), ('DT', _('Detva')), ('DK', _('Dolny Kubin')), ('DS', _('Dunajska Streda')), ('GA', _('Galanta')), ('GL', _('Gelnica')), ('HC', _('Hlohovec')), ('HE', _('Humenne')), ('IL', _('Ilava')), ('KK', _('Kezmarok')), ('KN', _('Komarno')), ('KE1', _('Kosice I')), ('KE2', _('Kosice II')), ('KE3', _('Kosice III')), ('KE4', _('Kosice IV')), ('KEO', _('Kosice - okolie')), ('KA', _('Krupina')), ('KM', _('Kysucke Nove Mesto')), ('LV', _('Levice')), ('LE', _('Levoca')), ('LM', _('Liptovsky Mikulas')), ('LC', _('Lucenec')), ('MA', _('Malacky')), ('MT', _('Martin')), ('ML', _('Medzilaborce')), ('MI', _('Michalovce')), ('MY', _('Myjava')), ('NO', _('Namestovo')), ('NR', _('Nitra')), ('NM', _('Nove Mesto nad Vahom')), ('NZ', _('Nove Zamky')), ('PE', _('Partizanske')), ('PK', _('Pezinok')), ('PN', _('Piestany')), ('PT', _('Poltar')), ('PP', _('Poprad')), ('PB', _('Povazska Bystrica')), ('PO', _('Presov')), ('PD', _('Prievidza')), ('PU', _('Puchov')), ('RA', _('Revuca')), ('RS', _('Rimavska Sobota')), ('RV', _('Roznava')), ('RK', _('Ruzomberok')), ('SB', _('Sabinov')), ('SC', _('Senec')), ('SE', _('Senica')), ('SI', _('Skalica')), ('SV', _('Snina')), ('SO', _('Sobrance')), ('SN', _('Spisska Nova Ves')), ('SL', _('Stara Lubovna')), ('SP', _('Stropkov')), ('SK', _('Svidnik')), ('SA', _('Sala')), ('TO', _('Topolcany')), ('TV', _('Trebisov')), ('TN', _('Trencin')), ('TT', _('Trnava')), ('TR', _('Turcianske Teplice')), ('TS', _('Tvrdosin')), ('VK', _('Velky Krtis')), ('VT', _('Vranov nad Toplou')), ('ZM', _('Zlate Moravce')), ('ZV', _('Zvolen')), ('ZC', _('Zarnovica')), ('ZH', _('Ziar nad Hronom')), ('ZA', _('Zilina')), )
Thraxis/SickRage
refs/heads/master
sickbeard/providers/shazbat.py
1
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. from sickbeard import logger from sickbeard import tvcache from sickrage.helper.exceptions import AuthException from sickrage.providers.TorrentProvider import TorrentProvider class ShazbatProvider(TorrentProvider): def __init__(self): TorrentProvider.__init__(self, "Shazbat.tv") self.supports_backlog = False self.passkey = None self.ratio = None self.options = None self.cache = ShazbatCache(self) self.urls = {'base_url': u'http://www.shazbat.tv/', 'website': u'http://www.shazbat.tv/login',} self.url = self.urls['website'] def _check_auth(self): if not self.passkey: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, data): if not self.passkey: self._check_auth() elif not (data['entries'] and data['feed']): logger.log(u"Invalid username or password. Check your settings", logger.WARNING) return True def seed_ratio(self): return self.ratio class ShazbatCache(tvcache.TVCache): def __init__(self, provider_obj): tvcache.TVCache.__init__(self, provider_obj) # only poll Shazbat feed every 15 minutes max self.minTime = 15 def _getRSSData(self): rss_url = self.provider.urls['base_url'] + 'rss/recent?passkey=' + provider.passkey + '&fname=true' logger.log(u"Cache update URL: %s" % rss_url, logger.DEBUG) return self.getRSSFeed(rss_url) def _checkAuth(self, data): return self.provider._checkAuthFromData(data) provider = ShazbatProvider()
alexpap/exareme
refs/heads/mip
exareme-tools/madis/src/functions/vtable/rc2db.py
4
import os.path import sys import functions import os from itertools import izip import cPickle import cStringIO import vtbase import struct import vtbase import os import gc import re import zlib ### Classic stream iterator registered=True BLOCK_SIZE = 200000000 import apsw import time import marshal class RC2DB(vtbase.VT): def VTiter(self, *args,**formatArgs): import bz2 import msgpack serializer = msgpack largs, dictargs = self.full_parse(args) where = None mode = 'row' input = cStringIO.StringIO() if 'file' in dictargs: where=dictargs['file'] else: raise functions.OperatorError(__name__.rsplit('.')[-1],"No destination provided") col = 0 filename, ext=os.path.splitext(os.path.basename(where)) if 'cols' in dictargs: a = re.split(' |,| , |, | ,' , dictargs['cols']) column = [x for x in a if x != ''] else: col = 1 start = 0 end = sys.maxint-1 if 'start' in dictargs: start = int(dictargs['start']) if 'end' in dictargs: end = int(dictargs['end']) fullpath = str(os.path.abspath(os.path.expandvars(os.path.expanduser(os.path.normcase(where))))) fileIterlist = [] for x in xrange(start,end+1): try: fileIterlist.append(open(fullpath+"."+str(x), "rb")) except: break if fileIterlist == []: try: fileIterlist = [open(where, "rb")] except : raise functions.OperatorError(__name__.rsplit('.')[-1],"No such file") cursor = [] for filenum,fileObject in enumerate(fileIterlist): b = struct.unpack('!B',fileObject.read(1)) schema = cPickle.load(fileObject) colnum = len(schema) readtype = '!'+'i'*colnum readsize = 4 * colnum if filenum == 0: yield schema def createdb(where, tname, schema, page_size=16384): c=apsw.Connection(where) cursor=c.cursor() list(cursor.execute('pragma page_size='+str(page_size)+';pragma cache_size=-1000;pragma legacy_file_format=false;pragma synchronous=0;pragma journal_mode=OFF;PRAGMA locking_mode = EXCLUSIVE')) create_schema='create table '+tname+' (' create_schema+='`'+unicode(schema[0][0])+'`'+ (' '+unicode(schema[0][1]) if schema[0][1]!=None else '') for colname, coltype in schema[1:]: create_schema+=',`'+unicode(colname)+'`'+ (' '+unicode(coltype) if coltype!=None else '') create_schema+='); begin exclusive;' list(cursor.execute(create_schema)) insertquery="insert into "+tname+' values('+','.join(['?']*len(schema))+')' return c, cursor, insertquery c, cursor, insertquery=createdb(where+".db", filename, schema) while True: try: b = struct.unpack('!B',fileObject.read(1)) except : break if b[0]: input.truncate(0) ind = struct.unpack(readtype,fileObject.read(readsize)) input.write(fileObject.read(sum(ind))) input.seek(0) gc.disable() cursor.executemany(insertquery, izip(*tuple(serializer.loads(zlib.decompress(input.read(ind[col]))) for col in xrange(colnum)))) gc.enable() elif not b[0]: schema = cPickle.load(fileObject) list(cursor.execute('commit')) c.close() try: for fileObject in fileIterlist: fileObject.close() except NameError: pass def Source(): return vtbase.VTGenerator(RC2DB) if not ('.' in __name__): """ This is needed to be able to test the function, put it at the end of every new function you create """ import sys import setpath from functions import * testfunction() if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') import doctest doctest.testmod()
andreif/django
refs/heads/master
tests/messages_tests/test_cookie.py
299
import json from django.contrib.messages import constants from django.contrib.messages.storage.base import Message from django.contrib.messages.storage.cookie import ( CookieStorage, MessageDecoder, MessageEncoder, ) from django.test import SimpleTestCase, override_settings from django.utils.safestring import SafeData, mark_safe from .base import BaseTests def set_cookie_data(storage, messages, invalid=False, encode_empty=False): """ Sets ``request.COOKIES`` with the encoded data and removes the storage backend's loaded data cache. """ encoded_data = storage._encode(messages, encode_empty=encode_empty) if invalid: # Truncate the first character so that the hash is invalid. encoded_data = encoded_data[1:] storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data} if hasattr(storage, '_loaded_data'): del storage._loaded_data def stored_cookie_messages_count(storage, response): """ Returns an integer containing the number of messages stored. """ # Get a list of cookies, excluding ones with a max-age of 0 (because # they have been marked for deletion). cookie = response.cookies.get(storage.cookie_name) if not cookie or cookie['max-age'] == 0: return 0 data = storage._decode(cookie.value) if not data: return 0 if data[-1] == CookieStorage.not_finished: data.pop() return len(data) @override_settings(SESSION_COOKIE_DOMAIN='.example.com', SESSION_COOKIE_SECURE=True, SESSION_COOKIE_HTTPONLY=True) class CookieTest(BaseTests, SimpleTestCase): storage_class = CookieStorage def stored_messages_count(self, storage, response): return stored_cookie_messages_count(storage, response) def test_get(self): storage = self.storage_class(self.get_request()) # Set initial data. example_messages = ['test', 'me'] set_cookie_data(storage, example_messages) # Test that the message actually contains what we expect. self.assertEqual(list(storage), example_messages) def test_cookie_setings(self): """ Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN, SESSION_COOKIE_SECURE and SESSION_COOKIE_HTTPONLY Refs #15618 and #20972. """ # Test before the messages have been consumed storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, 'test') storage.update(response) self.assertIn('test', response.cookies['messages'].value) self.assertEqual(response.cookies['messages']['domain'], '.example.com') self.assertEqual(response.cookies['messages']['expires'], '') self.assertEqual(response.cookies['messages']['secure'], True) self.assertEqual(response.cookies['messages']['httponly'], True) # Test deletion of the cookie (storing with an empty value) after the messages have been consumed storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, 'test') for m in storage: pass # Iterate through the storage to simulate consumption of messages. storage.update(response) self.assertEqual(response.cookies['messages'].value, '') self.assertEqual(response.cookies['messages']['domain'], '.example.com') self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT') def test_get_bad_cookie(self): request = self.get_request() storage = self.storage_class(request) # Set initial (invalid) data. example_messages = ['test', 'me'] set_cookie_data(storage, example_messages, invalid=True) # Test that the message actually contains what we expect. self.assertEqual(list(storage), []) def test_max_cookie_length(self): """ Tests that, if the data exceeds what is allowed in a cookie, older messages are removed before saving (and returned by the ``update`` method). """ storage = self.get_storage() response = self.get_response() # When storing as a cookie, the cookie has constant overhead of approx # 54 chars, and each message has a constant overhead of about 37 chars # and a variable overhead of zero in the best case. We aim for a message # size which will fit 4 messages into the cookie, but not 5. # See also FallbackTest.test_session_fallback msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37) for i in range(5): storage.add(constants.INFO, str(i) * msg_size) unstored_messages = storage.update(response) cookie_storing = self.stored_messages_count(storage, response) self.assertEqual(cookie_storing, 4) self.assertEqual(len(unstored_messages), 1) self.assertEqual(unstored_messages[0].message, '0' * msg_size) def test_json_encoder_decoder(self): """ Tests that a complex nested data structure containing Message instances is properly encoded/decoded by the custom JSON encoder/decoder classes. """ messages = [ { 'message': Message(constants.INFO, 'Test message'), 'message_list': [Message(constants.INFO, 'message %s') for x in range(5)] + [{'another-message': Message(constants.ERROR, 'error')}], }, Message(constants.INFO, 'message %s'), ] encoder = MessageEncoder(separators=(',', ':')) value = encoder.encode(messages) decoded_messages = json.loads(value, cls=MessageDecoder) self.assertEqual(messages, decoded_messages) def test_safedata(self): """ Tests that a message containing SafeData is keeping its safe status when retrieved from the message storage. """ def encode_decode(data): message = Message(constants.DEBUG, data) encoded = storage._encode(message) decoded = storage._decode(encoded) return decoded.message storage = self.get_storage() self.assertIsInstance( encode_decode(mark_safe("<b>Hello Django!</b>")), SafeData) self.assertNotIsInstance( encode_decode("<b>Hello Django!</b>"), SafeData) def test_pre_1_5_message_format(self): """ For ticket #22426. Tests whether messages that were set in the cookie before the addition of is_safedata are decoded correctly. """ # Encode the messages using the current encoder. messages = [Message(constants.INFO, 'message %s') for x in range(5)] encoder = MessageEncoder(separators=(',', ':')) encoded_messages = encoder.encode(messages) # Remove the is_safedata flag from the messages in order to imitate # the behavior of before 1.5 (monkey patching). encoded_messages = json.loads(encoded_messages) for obj in encoded_messages: obj.pop(1) encoded_messages = json.dumps(encoded_messages, separators=(',', ':')) # Decode the messages in the old format (without is_safedata) decoded_messages = json.loads(encoded_messages, cls=MessageDecoder) self.assertEqual(messages, decoded_messages)
arju88nair/projectCulminate
refs/heads/master
venv/lib/python3.5/site-packages/pip/commands/completion.py
343
from __future__ import absolute_import import sys from pip.basecommand import Command BASE_COMPLETION = """ # pip %(shell)s completion start%(script)s# pip %(shell)s completion end """ COMPLETION_SCRIPTS = { 'bash': """ _pip_completion() { COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ PIP_AUTO_COMPLETE=1 $1 ) ) } complete -o default -F _pip_completion pip """, 'zsh': """ function _pip_completion { local words cword read -Ac words read -cn cword reply=( $( COMP_WORDS="$words[*]" \\ COMP_CWORD=$(( cword-1 )) \\ PIP_AUTO_COMPLETE=1 $words[1] ) ) } compctl -K _pip_completion pip """, 'fish': """ function __fish_complete_pip set -lx COMP_WORDS (commandline -o) "" set -lx COMP_CWORD (math (contains -i -- (commandline -t) $COMP_WORDS)-1) set -lx PIP_AUTO_COMPLETE 1 string split \ -- (eval $COMP_WORDS[1]) end complete -fa "(__fish_complete_pip)" -c pip """} class CompletionCommand(Command): """A helper command to be used for command completion.""" name = 'completion' summary = 'A helper command used for command completion.' def __init__(self, *args, **kw): super(CompletionCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option( '--bash', '-b', action='store_const', const='bash', dest='shell', help='Emit completion code for bash') cmd_opts.add_option( '--zsh', '-z', action='store_const', const='zsh', dest='shell', help='Emit completion code for zsh') cmd_opts.add_option( '--fish', '-f', action='store_const', const='fish', dest='shell', help='Emit completion code for fish') self.parser.insert_option_group(0, cmd_opts) def run(self, options, args): """Prints the completion code of the given shell""" shells = COMPLETION_SCRIPTS.keys() shell_options = ['--' + shell for shell in sorted(shells)] if options.shell in shells: script = COMPLETION_SCRIPTS.get(options.shell, '') print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) else: sys.stderr.write( 'ERROR: You must pass %s\n' % ' or '.join(shell_options) )
hoangt/tpzsimul.gem5
refs/heads/master
ext/ply/example/BASIC/basiclog.py
166
# An implementation of Dartmouth BASIC (1964) # import sys sys.path.insert(0,"../..") if sys.version_info[0] >= 3: raw_input = input import logging logging.basicConfig( level = logging.INFO, filename = "parselog.txt", filemode = "w" ) log = logging.getLogger() import basiclex import basparse import basinterp # If a filename has been specified, we try to run it. # If a runtime error occurs, we bail out and enter # interactive mode below if len(sys.argv) == 2: data = open(sys.argv[1]).read() prog = basparse.parse(data,debug=log) if not prog: raise SystemExit b = basinterp.BasicInterpreter(prog) try: b.run() raise SystemExit except RuntimeError: pass else: b = basinterp.BasicInterpreter({}) # Interactive mode. This incrementally adds/deletes statements # from the program stored in the BasicInterpreter object. In # addition, special commands 'NEW','LIST',and 'RUN' are added. # Specifying a line number with no code deletes that line from # the program. while 1: try: line = raw_input("[BASIC] ") except EOFError: raise SystemExit if not line: continue line += "\n" prog = basparse.parse(line,debug=log) if not prog: continue keys = list(prog) if keys[0] > 0: b.add_statements(prog) else: stat = prog[keys[0]] if stat[0] == 'RUN': try: b.run() except RuntimeError: pass elif stat[0] == 'LIST': b.list() elif stat[0] == 'BLANK': b.del_line(stat[1]) elif stat[0] == 'NEW': b.new()
Tejal011089/digitales_frappe
refs/heads/develop
frappe/translate.py
14
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals """ Contributing: 1. Add the .csv file 2. Run import 3. Then run translate """ # loading # doctype, page, report # boot(startup) # frappe.require # frappe._ import frappe, os, re, codecs, json from frappe.utils.jinja import render_include from jinja2 import TemplateError def guess_language_from_http_header(lang): """set frappe.local.lang from HTTP headers at beginning of request""" if not lang: return frappe.local.lang guess = None lang_list = get_all_languages() or [] if ";" in lang: # not considering weightage lang = lang.split(";")[0] if "," in lang: lang = lang.split(",") else: lang = [lang] for l in lang: code = l.strip() if code in lang_list: guess = code break # check if parent language (pt) is setup, if variant (pt-BR) if "-" in code: code = code.split("-")[0] if code in lang_list: guess = code break return guess or frappe.local.lang def get_user_lang(user=None): """set frappe.local.lang from user preferences on session beginning or resumption""" if not user: user = frappe.session.user # via cache lang = frappe.cache().get_value("lang:" + user) if not lang: # if defined in user profile user_lang = frappe.db.get_value("User", user, "language") if user_lang and user_lang!="Loading...": lang = get_lang_dict().get(user_lang) else: default_lang = frappe.db.get_default("lang") lang = default_lang or frappe.local.lang frappe.cache().set_value("lang:" + user, lang or "en") return lang def set_default_language(language): lang = get_lang_dict()[language] frappe.db.set_default("lang", lang) frappe.local.lang = lang def get_all_languages(): return [a.split()[0] for a in get_lang_info()] def get_lang_dict(): return dict([[a[1], a[0]] for a in [a.split(None, 1) for a in get_lang_info()]]) def get_lang_info(): return frappe.cache().get_value("langinfo", lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt"))) def get_dict(fortype, name=None): fortype = fortype.lower() cache = frappe.cache() cache_key = "translation_assets:" + frappe.local.lang asset_key = fortype + ":" + (name or "-") translation_assets = cache.get_value(cache_key) or {} if not asset_key in translation_assets: if fortype=="doctype": messages = get_messages_from_doctype(name) elif fortype=="page": messages = get_messages_from_page(name) elif fortype=="report": messages = get_messages_from_report(name) elif fortype=="include": messages = get_messages_from_include_files() elif fortype=="jsfile": messages = get_messages_from_file(name) elif fortype=="boot": messages = get_messages_from_include_files() messages += frappe.db.sql_list("select name from tabDocType") messages += frappe.db.sql_list("select name from tabRole") messages += frappe.db.sql_list("select name from `tabModule Def`") translation_assets[asset_key] = make_dict_from_messages(messages) cache.set_value(cache_key, translation_assets) return translation_assets[asset_key] def add_lang_dict(code): messages = extract_messages_from_code(code) code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages)) return code def make_dict_from_messages(messages, full_dict=None): out = {} if full_dict==None: full_dict = get_full_dict(frappe.local.lang) for m in messages: if m in full_dict: out[m] = full_dict[m] return out def get_lang_js(fortype, name): return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name)) def get_full_dict(lang): if lang == "en": return {} return frappe.cache().get_value("lang:" + lang, lambda:load_lang(lang)) def load_lang(lang, apps=None): out = {} for app in (apps or frappe.get_all_apps(True)): path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv") if os.path.exists(path): cleaned = dict([item for item in dict(read_csv_file(path)).iteritems() if item[1]]) out.update(cleaned) return out def clear_cache(): cache = frappe.cache() cache.delete_value("langinfo") for lang in get_all_languages(): cache.delete_value("lang:" + lang) cache.delete_value("translation_assets:" + lang) def get_messages_for_app(app): messages = [] modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \ for m in frappe.local.app_modules[app]]) # doctypes if modules: for name in frappe.db.sql_list("""select name from tabDocType where module in ({})""".format(modules)): messages.extend(get_messages_from_doctype(name)) # pages for name, title in frappe.db.sql("""select name, title from tabPage where module in ({})""".format(modules)): messages.append(title or name) messages.extend(get_messages_from_page(name)) # reports for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport where tabReport.ref_doctype = tabDocType.name and tabDocType.module in ({})""".format(modules)): messages.append(name) messages.extend(get_messages_from_report(name)) # app_include_files messages.extend(get_messages_from_include_files(app)) # server_messages messages.extend(get_server_messages(app)) return list(set(messages)) def get_messages_from_doctype(name): messages = [] meta = frappe.get_meta(name) messages = [meta.name, meta.module] if meta.description: messages.append(meta.description) # translations of field labels, description and options for d in meta.get("fields"): messages.extend([d.label, d.description]) if d.fieldtype=='Select' and d.options \ and not d.options.startswith("attach_files:"): options = d.options.split('\n') if not "icon" in options[0]: messages.extend(options) # translations of roles for d in meta.get("permissions"): if d.role: messages.append(d.role) # extract from js, py files doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name) messages.extend(get_messages_from_file(doctype_file_path + ".js")) messages.extend(get_messages_from_file(doctype_file_path + "_list.js")) messages.extend(get_messages_from_file(doctype_file_path + "_list.html")) messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js")) return clean(messages) def get_messages_from_page(name): return get_messages_from_page_or_report("Page", name) def get_messages_from_report(name): report = frappe.get_doc("Report", name) messages = get_messages_from_page_or_report("Report", name, frappe.db.get_value("DocType", report.ref_doctype, "module")) if report.query: messages.extend(re.findall('"([^:,^"]*):', report.query)) messages.append(report.report_name) return clean(messages) def get_messages_from_page_or_report(doctype, name, module=None): if not module: module = frappe.db.get_value(doctype, name, "module") file_path = frappe.get_module_path(module, doctype, name, name) messages = get_messages_from_file(file_path + ".js") messages += get_messages_from_file(file_path + ".html") messages += get_messages_from_file(file_path + ".py") return clean(messages) def get_server_messages(app): messages = [] for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)): for dontwalk in (".git", "public", "locale"): if dontwalk in folders: folders.remove(dontwalk) for f in files: if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"): messages.extend(get_messages_from_file(os.path.join(basepath, f))) return clean(messages) def get_messages_from_include_files(app_name=None): messages = [] for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []): messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file))) return clean(messages) def get_messages_from_file(path): """get list of messages from a code file""" if os.path.exists(path): with open(path, 'r') as sourcefile: return extract_messages_from_code(sourcefile.read(), path.endswith(".py")) else: return [] def extract_messages_from_code(code, is_py=False): try: code = render_include(code) except TemplateError: # Exception will occur when it encounters John Resig's microtemplating code pass messages = [] messages += re.findall('_\("([^"]*)"', code) messages += re.findall("_\('([^']*)'", code) if is_py: messages += re.findall('_\("{3}([^"]*)"{3}.*\)', code, re.S) return clean(messages) def clean(messages): l = [] messages = list(set(messages)) for m in messages: if m: if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"): l.append(m) return l def read_csv_file(path): from csv import reader with codecs.open(path, 'r', 'utf-8') as msgfile: data = msgfile.read() data = data.replace(chr(28), "").replace(chr(29), "") data = reader([r.encode('utf-8') for r in data.splitlines()]) newdata = [[unicode(val, 'utf-8') for val in row] for row in data] return newdata def write_csv_file(path, app_messages, lang_dict): app_messages.sort() from csv import writer with open(path, 'w') as msgfile: w = writer(msgfile) for m in app_messages: t = lang_dict.get(m, '') # strip whitespaces t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t) w.writerow([m.encode('utf-8'), t.encode('utf-8')]) def get_untranslated(lang, untranslated_file, get_all=False): """translate objects using Google API. Add you own API key for translation""" clear_cache() apps = frappe.get_all_apps(True) messages = [] untranslated = [] for app in apps: messages.extend(get_messages_for_app(app)) messages = list(set(messages)) def escape_newlines(s): return (s.replace("\\\n", "|||||") .replace("\\n", "||||") .replace("\n", "|||")) if get_all: print str(len(messages)) + " messages" with open(untranslated_file, "w") as f: for m in messages: # replace \n with ||| so that internal linebreaks don't get split f.write((escape_newlines(m) + os.linesep).encode("utf-8")) else: full_dict = get_full_dict(lang) for m in messages: if not full_dict.get(m): untranslated.append(m) if untranslated: print str(len(untranslated)) + " missing translations of " + str(len(messages)) with open(untranslated_file, "w") as f: for m in untranslated: # replace \n with ||| so that internal linebreaks don't get split f.write((escape_newlines(m) + os.linesep).encode("utf-8")) else: print "all translated!" def update_translations(lang, untranslated_file, translated_file): clear_cache() full_dict = get_full_dict(lang) def restore_newlines(s): return (s.replace("|||||", "\\\n") .replace("| | | | |", "\\\n") .replace("||||", "\\n") .replace("| | | |", "\\n") .replace("|||", "\n") .replace("| | |", "\n")) translation_dict = {} for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False), frappe.get_file_items(translated_file, ignore_empty_lines=False)): # undo hack in get_untranslated translation_dict[restore_newlines(key)] = restore_newlines(value) full_dict.update(translation_dict) for app in frappe.get_all_apps(True): write_translations_file(app, lang, full_dict) def rebuild_all_translation_files(): for lang in get_all_languages(): for app in frappe.get_all_apps(): write_translations_file(app, lang) def write_translations_file(app, lang, full_dict=None, app_messages=None): if not app_messages: app_messages = get_messages_for_app(app) if not app_messages: return tpath = frappe.get_pymodule_path(app, "translations") frappe.create_folder(tpath) write_csv_file(os.path.join(tpath, lang + ".csv"), app_messages, full_dict or get_full_dict(lang)) def send_translations(translation_dict): """send these translations in response""" if "__messages" not in frappe.local.response: frappe.local.response["__messages"] = {} frappe.local.response["__messages"].update(translation_dict)
tony/kivy
refs/heads/master
kivy/uix/relativelayout.py
18
''' Relative Layout =============== .. versionadded:: 1.4.0 This layout allows you to set relative coordinates for children. If you want absolute positioning, use the :class:`~kivy.uix.floatlayout.FloatLayout`. The :class:`RelativeLayout` class behaves just like the regular :class:`FloatLayout` except that its child widgets are positioned relative to the layout. When a widget with position = (0,0) is added to a RelativeLayout, the child widget will also move when the position of the RelativeLayout is changed. The child widgets coordinates remain (0,0) as they are always relative to the parent layout. Coordinate Systems ------------------ Window coordinates ~~~~~~~~~~~~~~~~~~ By default, there's only one coordinate system that defines the position of widgets and touch events dispatched to them: the window coordinate system, which places (0, 0) at the bottom left corner of the window. Although there are other coordinate systems defined, e.g. local and parent coordinates, these coordinate systems are identical to the window coordinate system as long as a relative layout type widget is not in the widget's parent stack. When widget.pos is read or a touch is received, the coordinate values are in parent coordinates, but as mentioned, these are identical to window coordinates, even in complex widget stacks. For example: .. code-block:: kv BoxLayout: Label: text: 'Left' Button: text: 'Middle' on_touch_down: print('Middle: {}'.format(args[1].pos)) BoxLayout: on_touch_down: print('Box: {}'.format(args[1].pos)) Button: text: 'Right' on_touch_down: print('Right: {}'.format(args[1].pos)) When the middle button is clicked and the touch propagates through the different parent coordinate systems, it prints the following:: >>> Box: (430.0, 282.0) >>> Right: (430.0, 282.0) >>> Middle: (430.0, 282.0) As claimed, the touch has identical coordinates to the window coordinates in every coordinate system. :meth:`~kivy.uix.widget.Widget.collide_point` for example, takes the point in window coordinates. Parent coordinates ~~~~~~~~~~~~~~~~~~ Other :class:`RelativeLayout` type widgets are :class:`~kivy.uix.scatter.Scatter`, :class:`~kivy.uix.scatterlayout.ScatterLayout`, and :class:`~kivy.uix.scrollview.ScrollView`. If such a special widget is in the parent stack, only then does the parent and local coordinate system diverge from the window coordinate system. For each such widget in the stack, a coordinate system with (0, 0) of that coordinate system being at the bottom left corner of that widget is created. **Position and touch coordinates received and read by a widget are in the coordinate system of the most recent special widget in its parent stack (not including itself) or in window coordinates if there are none** (as in the first example). We call these coordinates parent coordinates. For example: .. code-block:: kv BoxLayout: Label: text: 'Left' Button: text: 'Middle' on_touch_down: print('Middle: {}'.format(args[1].pos)) RelativeLayout: on_touch_down: print('Relative: {}'.format(args[1].pos)) Button: text: 'Right' on_touch_down: print('Right: {}'.format(args[1].pos)) Clicking on the middle button prints:: >>> Relative: (396.0, 298.0) >>> Right: (-137.33, 298.0) >>> Middle: (396.0, 298.0) As the touch propagates through the widgets, for each widget, the touch is received in parent coordinates. Because both the relative and middle widgets don't have these special widgets in their parent stack, the touch is the same as window coordinates. Only the right widget, which has a RelativeLayout in its parent stack, receives the touch in coordinates relative to that RelativeLayout which is different than window coordinates. Local and Widget coordinates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When expressed in parent coordinates, the position is expressed in the coordinates of the most recent special widget in its parent stack, not including itself. When expressed in local or widget coordinates, the widgets themselves are also included. Changing the above example to transform the parent coordinates into local coordinates: .. code-block:: kv BoxLayout: Label: text: 'Left' Button: text: 'Middle' on_touch_down: print('Middle: {}'.format(\ self.to_local(*args[1].pos))) RelativeLayout: on_touch_down: print('Relative: {}'.format(\ self.to_local(*args[1].pos))) Button: text: 'Right' on_touch_down: print('Right: {}'.format(\ self.to_local(*args[1].pos))) Now, clicking on the middle button prints:: >>> Relative: (-135.33, 301.0) >>> Right: (-135.33, 301.0) >>> Middle: (398.0, 301.0) This is because now the relative widget also expresses the coordinates relative to itself. Coordinate transformations ~~~~~~~~~~~~~~~~~~~~~~~~~~ :class:`~kivy.uix.widget.Widget` provides 4 functions to transform coordinates between the various coordinate systems. For now, we assume that the `relative` keyword of these functions is `False`. :meth:`~kivy.uix.widget.Widget.to_widget` takes the coordinates expressed in window coordinates and returns them in local (widget) coordinates. :meth:`~kivy.uix.widget.Widget.to_window` takes the coordinates expressed in local coordinates and returns them in window coordinates. :meth:`~kivy.uix.widget.Widget.to_parent` takes the coordinates expressed in local coordinates and returns them in parent coordinates. :meth:`~kivy.uix.widget.Widget.to_local` takes the coordinates expressed in parent coordinates and returns them in local coordinates. Each of the 4 transformation functions take a `relative` parameter. When the relative parameter is True, the coordinates are returned or originate in true relative coordinates - relative to a coordinate system with its (0, 0) at the bottom left corner of the widget in question. .. _kivy-uix-relativelayout-common-pitfalls: Common Pitfalls --------------- As all positions within a :class:`RelativeLayout` are relative to the position of the layout itself, the position of the layout should never be used in determining the position of sub-widgets or the layout's :attr:`canvas`. Take the following kv code for example: .. container:: align-right .. figure:: images/relativelayout-fixedposition.png :scale: 50% expected result .. figure:: images/relativelayout-doubleposition.png :scale: 50% actual result .. code-block:: kv FloatLayout: Widget: size_hint: None, None size: 200, 200 pos: 200, 200 canvas: Color: rgba: 1, 1, 1, 1 Rectangle: pos: self.pos size: self.size RelativeLayout: size_hint: None, None size: 200, 200 pos: 200, 200 canvas: Color: rgba: 1, 0, 0, 0.5 Rectangle: pos: self.pos # incorrect size: self.size You might expect this to render a single pink rectangle; however, the content of the :class:`RelativeLayout` is already transformed, so the use of `pos: self.pos` will double that transformation. In this case, using `pos: 0, 0` or omitting `pos` completely will provide the expected result. This also applies to the position of sub-widgets. Instead of positioning a :class:`~kivy.uix.widget.Widget` based on the layout's own position: .. code-block:: kv RelativeLayout: Widget: pos: self.parent.pos Widget: center: self.parent.center use the :attr:`pos_hint` property: .. code-block:: kv RelativeLayout: Widget: pos_hint: {'x': 0, 'y': 0} Widget: pos_hint: {'center_x': 0.5, 'center_y': 0.5} .. versionchanged:: 1.7.0 Prior to version 1.7.0, the :class:`RelativeLayout` was implemented as a :class:`~kivy.uix.floatlayout.FloatLayout` inside a :class:`~kivy.uix.scatter.Scatter`. This behaviour/widget has been renamed to `ScatterLayout`. The :class:`RelativeLayout` now only supports relative positions (and can't be rotated, scaled or translated on a multitouch system using two or more fingers). This was done so that the implementation could be optimized and avoid the heavier calculations of :class:`Scatter` (e.g. inverse matrix, recalculating multiple properties etc.) ''' __all__ = ('RelativeLayout', ) from kivy.uix.floatlayout import FloatLayout class RelativeLayout(FloatLayout): '''RelativeLayout class, see module documentation for more information. ''' def __init__(self, **kw): super(RelativeLayout, self).__init__(**kw) funbind = self.funbind trigger = self._trigger_layout funbind('pos', trigger) funbind('pos_hint', trigger) def do_layout(self, *args): super(RelativeLayout, self).do_layout(pos=(0, 0)) def to_parent(self, x, y, **k): return (x + self.x, y + self.y) def to_local(self, x, y, **k): return (x - self.x, y - self.y) def _apply_transform(self, m, pos=None): m.translate(self.x, self.y, 0) return super(RelativeLayout, self)._apply_transform(m, (0, 0)) def on_touch_down(self, touch): x, y = touch.x, touch.y touch.push() touch.apply_transform_2d(self.to_local) ret = super(RelativeLayout, self).on_touch_down(touch) touch.pop() return ret def on_touch_move(self, touch): x, y = touch.x, touch.y touch.push() touch.apply_transform_2d(self.to_local) ret = super(RelativeLayout, self).on_touch_move(touch) touch.pop() return ret def on_touch_up(self, touch): x, y = touch.x, touch.y touch.push() touch.apply_transform_2d(self.to_local) ret = super(RelativeLayout, self).on_touch_up(touch) touch.pop() return ret
shingonoide/odoo
refs/heads/deverp_8.0
addons/website_mail_group/models/mail_group.py
321
# -*- coding: utf-8 -*- from openerp.osv import osv from openerp import tools from openerp.tools.translate import _ from openerp.tools.safe_eval import safe_eval as eval from openerp.addons.website.models.website import slug class MailGroup(osv.Model): _inherit = 'mail.group' def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None): res = super(MailGroup, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context) group = self.browse(cr, uid, id, context=context) base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') headers = {} if res.get('headers'): try: headers = eval(res['headers']) except Exception: pass headers.update({ 'List-Archive': '<%s/groups/%s>' % (base_url, slug(group)), 'List-Subscribe': '<%s/groups>' % (base_url), 'List-Unsubscribe': '<%s/groups?unsubscribe>' % (base_url,), }) res['headers'] = repr(headers) return res class MailMail(osv.Model): _inherit = 'mail.mail' def send_get_mail_body(self, cr, uid, mail, partner=None, context=None): """ Short-circuit parent method for mail groups, replace the default footer with one appropriate for mailing-lists.""" if mail.model == 'mail.group' and mail.res_id: # no super() call on purpose, no private links that could be quoted! group = self.pool['mail.group'].browse(cr, uid, mail.res_id, context=context) base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') vals = { 'maillist': _('Mailing-List'), 'post_to': _('Post to'), 'unsub': _('Unsubscribe'), 'mailto': 'mailto:%s@%s' % (group.alias_name, group.alias_domain), 'group_url': '%s/groups/%s' % (base_url, slug(group)), 'unsub_url': '%s/groups?unsubscribe' % (base_url,), } footer = """_______________________________________________ %(maillist)s: %(group_url)s %(post_to)s: %(mailto)s %(unsub)s: %(unsub_url)s """ % vals body = tools.append_content_to_html(mail.body, footer, container_tag='div') return body else: return super(MailMail, self).send_get_mail_body(cr, uid, mail, partner=partner, context=context)
jefflyn/buddha
refs/heads/master
src/mlia/Ch02/EXTRAS/createDist.py
5
''' Created on Oct 6, 2010 @author: Peter ''' from numpy import * import matplotlib import matplotlib.pyplot as plt from matplotlib.patches import Rectangle n = 1000 #number of points to create xcord = zeros((n)) ycord = zeros((n)) markers =[] colors =[] fw = open('testSet.txt','w') for i in range(n): [r0,r1] = random.standard_normal(2) myClass = random.uniform(0,1) if (myClass <= 0.16): fFlyer = random.uniform(22000, 60000) tats = 3 + 1.6*r1 markers.append(20) colors.append(2.1) classLabel = 1 #'didntLike' print ("%d, %f, class1") % (fFlyer, tats) elif ((myClass > 0.16) and (myClass <= 0.33)): fFlyer = 6000*r0 + 70000 tats = 10 + 3*r1 + 2*r0 markers.append(20) colors.append(1.1) classLabel = 1 #'didntLike' print ("%d, %f, class1") % (fFlyer, tats) elif ((myClass > 0.33) and (myClass <= 0.66)): fFlyer = 5000*r0 + 10000 tats = 3 + 2.8*r1 markers.append(30) colors.append(1.1) classLabel = 2 #'smallDoses' print ("%d, %f, class2") % (fFlyer, tats) else: fFlyer = 10000*r0 + 35000 tats = 10 + 2.0*r1 markers.append(50) colors.append(0.1) classLabel = 3 #'largeDoses' print ("%d, %f, class3") % (fFlyer, tats) if (tats < 0): tats =0 if (fFlyer < 0): fFlyer =0 xcord[i] = fFlyer; ycord[i]=tats fw.write("%d\t%f\t%f\t%d\n" % (fFlyer, tats, random.uniform(0.0, 1.7), classLabel)) fw.close() fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(xcord,ycord, c=colors, s=markers) type1 = ax.scatter([-10], [-10], s=20, c='red') type2 = ax.scatter([-10], [-15], s=30, c='green') type3 = ax.scatter([-10], [-20], s=50, c='blue') ax.legend([type1, type2, type3], ["Class 1", "Class 2", "Class 3"], loc=2) #ax.axis([-5000,100000,-2,25]) plt.xlabel('Frequent Flyier Miles Earned Per Year') plt.ylabel('Percentage of Body Covered By Tatoos') plt.show()
jwlawson/tensorflow
refs/heads/master
tensorflow/contrib/labeled_tensor/python/ops/core_test.py
112
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import operator import re import textwrap import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test as test_lib class AxisTest(test_lib.TestCase): def setUp(self): d_7 = tensor_shape.Dimension(7) p_rgb = ['red', 'green', 'blue'] self.i_7 = core.Axis('7', d_7) self.i_7p = core.Axis('7prime', d_7) self.i_rgb = core.Axis('rgb', p_rgb) self.i_range = core.Axis('range', range(7)) self.i_unknown = core.Axis('unknown', None) def test_equality(self): axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown] for i, axis_0 in enumerate(axes): for j, axis_1 in enumerate(axes): if i == j: self.assertEqual(axis_0, axis_1) else: self.assertNotEqual(axis_0, axis_1) def test_axis_value(self): self.assertEqual(self.i_7.value, tensor_shape.Dimension(7)) self.assertTrue(self.i_range.value == tuple(range(7))) def test_axis_input(self): axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown] for axis in axes: self.assertEqual(axis, core.Axis(axis.name, axis.value)) def test_axis_value_input(self): axis = self.i_range for value in [range(7), list(range(7)), np.arange(7)]: self.assertEqual(axis, core.Axis(axis.name, value)) def test_size(self): self.assertEqual(len(self.i_7), 7) self.assertEqual(len(self.i_rgb), 3) self.assertEqual(len(self.i_range), 7) self.assertEqual(self.i_unknown.size, None) def test_concat_single(self): red = core.Axis('rgb', ['red']) self.assertEqual(core.concat_axes([red]), red) def test_concat_many(self): red = core.Axis('rgb', ['red']) green = core.Axis('rgb', ['green']) blue = core.Axis('rgb', ['blue']) red_green_blue = core.Axis('rgb', ['red', 'green', 'blue']) self.assertEqual(core.concat_axes([red, green, blue]), red_green_blue) def test_concat_different_names(self): red = core.Axis('red', ['red']) green = core.Axis('green', ['red']) with self.assertRaises(ValueError): core.concat_axes([red, green]) def test_concat_unknown(self): red = core.Axis('rgb', None) green = core.Axis('rgb', None) self.assertEqual(core.concat_axes([red, green]), red) def test_repr(self): self.assertEqual("Axis('7', Dimension(7))", repr(self.i_7)) def test_invalid_input(self): with self.assertRaises(TypeError): core.Axis('foo', [{}]) with self.assertRaises(ValueError): core.Axis('foo', [1, 2, 3, 1]) red = core.Axis('foo', ['red']) with self.assertRaises(tc.Error): core.concat_axes([red, 1]) def test_as_axis(self): self.assertEqual(self.i_7, core.as_axis(('7', 7))) self.assertEqual(self.i_7, core.as_axis(self.i_7)) class AxesTest(test_lib.TestCase): def setUp(self): d_7 = tensor_shape.Dimension(7) d_8 = tensor_shape.Dimension(8) p_rgb = ['red', 'green', 'blue'] p_range = range(7) self.i_8 = core.Axis('8', d_8) self.a0 = core.Axes([('d7', d_7)]) self.a1 = core.Axes([('d7', d_7)]) self.a2 = core.Axes([('d7', d_7), ('rgb', p_rgb)]) self.a3 = core.Axes([('8', d_8), ('range', p_range)]) def test_equality(self): self.assertEqual(self.a0, self.a0) self.assertEqual(self.a0, self.a1) self.assertNotEqual(self.a0, self.a2) def test_repr(self): self.assertEqual("Axes([('d7', Dimension(7))])", repr(self.a0)) def test_remove(self): a = self.a3.remove('range') self.assertEqual(a, core.Axes([self.i_8])) with self.assertRaises(KeyError): self.a3.remove('foobar') def test_typecheck_error_message(self): pattern = ('List(Union(labeled_tensor.Axis, Tuple(..., ' 'Union(Union(numpy.ndarray, %s, list, tuple), ' 'Optional(Union(tensorflow.Dimension, int))))))' % range.__name__) regexp = re.escape(pattern).replace(re.escape('...'), '.*') with self.assertRaisesRegexp(tc.Error, 'allowed type ' + regexp): core.Axes(None) class LabeledTensorTest(test_util.Base): def setUp(self): tensor = array_ops.ones([7, 3, 8, 1]) a0 = ('x', range(7)) a1 = ('channel', ['red', 'green', 'blue']) a2 = ('y', 8) a3 = ('z', tensor_shape.Dimension(1)) self.lt = core.LabeledTensor(tensor, [a0, a1, a2, a3]) def test_repr(self): pattern = textwrap.dedent("""\ <LabeledTensor '...' shape=(7, 3, 8, 1) dtype=float32 axes=[('x', ...), ('channel', ...), ('y', Dimension(8)), ('z', Dimension(1))]>""") regexp = re.escape(pattern).replace(re.escape('...'), '.*') self.assertRegexpMatches(repr(self.lt), regexp) def test_reuse_existing_axes(self): alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes) self.assertLabeledTensorsEqual(alt_lt, self.lt) def test_reuse_existing_axis_objects(self): alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes.values()) self.assertLabeledTensorsEqual(alt_lt, self.lt) def test_indexing_scalars(self): actual = self.lt[:, :, :, 0] expected = core.LabeledTensor(self.lt.tensor[:, :, :, 0], list(self.lt.axes.values())[:-1]) self.assertLabeledTensorsEqual(actual, expected) actual = self.lt[1, :, :, 0] expected = core.LabeledTensor(self.lt.tensor[1, :, :, 0], list(self.lt.axes.values())[1:-1]) self.assertLabeledTensorsEqual(actual, expected) actual = self.lt[1, 2, :, 0] expected = core.LabeledTensor(self.lt.tensor[1, 2, :, 0], list(self.lt.axes.values())[2:-1]) self.assertLabeledTensorsEqual(actual, expected) def test_indexing_1d(self): lt_1d = self.lt[1, 2, :, 0] actual = lt_1d[3] expected = core.LabeledTensor(lt_1d.tensor[3], []) self.assertLabeledTensorsEqual(actual, expected) def test_indexing_slices(self): actual = self.lt[:3, :, :, :] axes = [('x', range(3))] + list(self.lt.axes.values())[1:] expected = core.LabeledTensor(self.lt.tensor[:3, :, :, :], axes) self.assertLabeledTensorsEqual(actual, expected) def test_invalid_indexing(self): with self.assertRaises(ValueError): self.lt[0] # pylint: disable=pointless-statement with self.assertRaises(ValueError): self.lt[:, :, :, :, 0] # pylint: disable=pointless-statement def test_unknown_size(self): tensor = array_ops.placeholder(dtypes.string, [None]) actual = core.LabeledTensor(tensor, ['x']) self.assertIsNone(actual.axes['x'].size) self.assertIs(actual.axes['x'].value, tensor.get_shape()[0]) def test_eq(self): self.assertEqual(self.lt, self.lt) self.assertNotEqual(self.lt, self.lt.tensor) self.assertNotEqual(self.lt.tensor, self.lt) def test_hash(self): lt1 = self.lt lt2 = core.LabeledTensor(self.lt.tensor, self.lt.axes) self.assertEqual(lt1, lt2) self.assertEqual(hash(lt1), hash(lt2)) def test_name(self): self.assertEqual(self.lt.name, self.lt.tensor.name) def test_dtype(self): self.assertEqual(self.lt.dtype, self.lt.tensor.dtype) def test_get_shape(self): self.assertEqual(self.lt.get_shape(), self.lt.tensor.get_shape()) def test_convert_to_tensor(self): expected = self.lt.tensor actual = ops.convert_to_tensor(self.lt) self.assertIs(expected, actual) class Base(test_util.Base): def setUp(self): self.x_size = 7 self.channel_size = 3 self.z_size = 4 self.probs_size = 11 tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size * self.probs_size) tensor = array_ops.reshape( tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size]) a0 = ('x', range(self.x_size)) a1 = ('channel', ['red', 'green', 'blue']) a2 = 'z' a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size)) self.tensor = tensor self.a0 = a0 self.a1 = a1 self.a2 = a2 self.a3 = a3 self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3]) self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0, 'channel': 0}) self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3, 'z': 0}) class IdentityTest(Base): def test_name(self): identity_lt = core.identity(self.original_lt) self.assertIn('lt_identity', identity_lt.name) class SliceFunctionTest(Base): def test_name(self): select_lt = core.slice_function(self.original_lt, {'channel': 1}) self.assertIn('lt_slice', select_lt.name) def test_scalar(self): select_lt = core.slice_function(self.original_lt, {'channel': 1}) golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice(self): select_lt = core.slice_function(self.original_lt, {'channel': slice(0, 2)}) a1_sliced = ('channel', ['red', 'green']) golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :], [self.a0, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slices(self): select_lt = core.slice_function( self.original_lt, {'x': slice(1, 5), 'channel': slice(1, None)}) a0_sliced = ('x', range(1, 5)) a1_sliced = ('channel', ['green', 'blue']) golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :], [a0_sliced, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice_unlabeled(self): select_lt = core.slice_function(self.original_lt, {'z': slice(1, 3)}) a2_sliced = 'z' golden_lt = core.LabeledTensor(self.tensor[:, :, 1:3, :], [self.a0, self.a1, a2_sliced, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice_unknown_shape(self): lt = core.LabeledTensor( array_ops.placeholder(dtypes.float32, [None, 1]), ['x', 'y']) sliced_lt = core.slice_function(lt, {'y': 0}) self.assertEqual(list(sliced_lt.axes.values()), [lt.axes['x']]) class TransposeTest(Base): def test_name(self): transpose_lt = core.transpose(self.original_lt, self.original_lt.axes.keys()) self.assertIn('lt_transpose', transpose_lt.name) def test_identity(self): transpose_lt = core.transpose(self.original_lt, self.original_lt.axes.keys()) golden_lt = self.original_lt self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test(self): transpose_lt = core.transpose(self.original_lt, ['z', 'channel', 'x', 'probs']) golden_lt = core.LabeledTensor( array_ops.transpose(self.tensor, [2, 1, 0, 3]), [self.a2, self.a1, self.a0, self.a3]) self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test_default_axis_order(self): transpose_lt = core.transpose(self.original_lt) golden_lt = core.LabeledTensor( array_ops.transpose(self.tensor, [3, 2, 1, 0]), list(reversed(list(self.original_lt.axes.values())))) self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): core.transpose(self.original_lt, ['channel', 'x', 'probs']) with self.assertRaises(ValueError): core.transpose(self.original_lt, ['z', 'foo', 'x', 'probs']) class ExpandDimsTest(Base): def test_name(self): expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys()) self.assertIn('lt_expand', expand_lt.name) def test_identity(self): expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys()) golden_lt = self.original_lt self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test(self): expand_lt = core.expand_dims( self.original_lt, ['foo', 'x', 'bar', 'channel', 'z', 'probs', 'grok']) golden_lt = core.LabeledTensor( array_ops.reshape(self.tensor, [ 1, self.x_size, 1, self.channel_size, self.z_size, self.probs_size, 1 ]), ['foo', self.a0, 'bar', self.a1, self.a2, self.a3, 'grok']) self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test_label(self): expand_lt = core.expand_dims(self.original_lt, [ 'x', 'channel', ('foo', 'bar'), 'z', 'probs', ]) golden_lt = core.LabeledTensor( array_ops.reshape( self.tensor, [self.x_size, self.channel_size, 1, self.z_size, self.probs_size]), [self.a0, self.a1, ('foo', ['bar']), self.a2, self.a3]) self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test_unknown_dimension(self): orig_lt = core.LabeledTensor( array_ops.placeholder(dtypes.float32, [None]), ['x']) expand_lt = core.expand_dims(orig_lt, ['x', 'y']) self.assertEqual(expand_lt.axes, core.Axes([('x', None), ('y', 1)])) def test_invalid_input(self): with self.assertRaises(core.AxisOrderError): core.expand_dims(self.original_lt, ['foo', 'not_x', 'bar', 'channel', 'z', 'probs', 'grok']) with self.assertRaises(core.AxisOrderError): core.expand_dims(self.original_lt, ['foo', 'z', 'bar', 'channel', 'x', 'probs', 'grok']) class AxisOrderScopeTest(Base): def test(self): xyz = ['x', 'y', 'z'] abc = ['a', 'b', 'c'] self.assertIsNone(core.get_axis_order()) with core.axis_order_scope(xyz): self.assertEqual(core.get_axis_order(), xyz) with core.axis_order_scope(): self.assertIsNone(core.get_axis_order()) with core.axis_order_scope(abc): self.assertEqual(core.get_axis_order(), abc) self.assertIsNone(core.get_axis_order()) self.assertEqual(core.get_axis_order(), xyz) self.assertIsNone(core.get_axis_order()) class CheckAxisOrderTest(Base): def test_passes(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) core.check_axis_order(lt, axis_order) lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[1:]) core.check_axis_order(lt, axis_order) lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[:-1]) core.check_axis_order(lt, axis_order) def test_invalid(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt, axis_order[:-1]) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt, axis_order[::-1]) def test_scope(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) with core.axis_order_scope(axis_order): core.check_axis_order(lt) class ImposeAxisOrderTest(Base): def test_identity(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) actual = core.impose_axis_order(lt, axis_order) self.assertLabeledTensorsEqual(lt, actual) lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3]) actual = core.impose_axis_order(lt, axis_order) self.assertLabeledTensorsEqual(lt, actual) def test_reverse(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) actual = core.impose_axis_order(lt, axis_order[::-1]) expected = core.transpose(lt, axis_order[::-1]) self.assertLabeledTensorsEqual(expected, actual) lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3]) actual = core.impose_axis_order(lt, axis_order[::-1]) expected = core.transpose(lt, ['y', 'x', 'w']) self.assertLabeledTensorsEqual(expected, actual) def test_scope(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) expected = core.transpose(lt, axis_order[::-1]) with core.axis_order_scope(axis_order[::-1]): actual = core.impose_axis_order(lt) self.assertLabeledTensorsEqual(expected, actual) def test_invalid(self): lt = core.LabeledTensor( array_ops.reshape(math_ops.range(2), (1, 2)), ['x', 'y']) with self.assertRaises(ValueError): core.impose_axis_order(lt) with self.assertRaises(ValueError): core.impose_axis_order(lt, ['x']) class FindConsistentOrderingTest(Base): def test(self): cases = [ ([], [], []), (['x'], [], ['x']), ([], ['x'], ['x']), (['x'], ['x'], ['x']), (['x'], ['y'], ['x', 'y']), (['y'], ['x'], ['y', 'x']), (['x', 'y'], ['x', 'y'], ['x', 'y']), (['x', 'y'], ['y', 'x'], None), (['x', 'y'], ['y', 'z'], ['x', 'y', 'z']), (['x', 'z'], ['y', 'z'], ['x', 'y', 'z']), (['x', 'y'], ['x', 'z'], ['x', 'y', 'z']), (['w', 'x'], ['y', 'z'], ['w', 'x', 'y', 'z']), (['x', 'y', 'z'], ['z', 'x'], None), (['x', 'y', 'z'], ['x'], ['x', 'y', 'z']), ([], ['x', 'y', 'z'], ['x', 'y', 'z']), ] for a, b, expected in cases: actual = core._find_consistent_ordering(a, b) msg = ('unexpected ordering between %r and %r:\nexpected: %r\nactual: %r' % (a, b, expected, actual)) self.assertEqual(expected, actual, msg=msg) class AlignTest(Base): def test_name(self): align_lt_0, align_lt_1, _ = core.align(self.original_lt, self.original_lt) self.assertIn('lt_align', align_lt_0.name) self.assertIn('/0', align_lt_0.name) self.assertIn('lt_align', align_lt_1.name) self.assertIn('/1', align_lt_1.name) def test_identical_shaped_inputs(self): offset_tensor = self.original_lt.tensor + 1 offset_lt = core.LabeledTensor(offset_tensor, self.original_lt.axes) align_lt, align_offset_lt, broadcast_axes = core.align(self.original_lt, offset_lt) self.assertLabeledTensorsEqual(align_lt, self.original_lt) self.assertLabeledTensorsEqual(align_offset_lt, offset_lt) self.assertEqual(broadcast_axes, self.original_lt.axes) def test_different_inputs(self): # The correct axis ordering is ['x', 'channel', 'probs']. align_x_probs_lt, align_channel_probs_lt, broadcast_axes = core.align( self.x_probs_lt, self.channel_probs_lt) x_probs_golden_lt = core.LabeledTensor( array_ops.reshape(self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]), [self.a0, 'channel', self.a3]) self.assertLabeledTensorsEqual(align_x_probs_lt, x_probs_golden_lt) channel_probs_golden_lt = core.LabeledTensor( array_ops.reshape(self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]), ['x', self.a1, self.a3]) self.assertLabeledTensorsEqual(align_channel_probs_lt, channel_probs_golden_lt) self.assertEqual(broadcast_axes, core.Axes([self.a0, self.a1, self.a3])) def test_axis_order_scope(self): xz_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'z']) yz_lt = core.LabeledTensor(array_ops.ones((4, 3)), ['y', 'z']) _, _, broadcast_axes = core.align(xz_lt, yz_lt) self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z']) _, _, broadcast_axes = core.align(yz_lt, xz_lt) self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z']) with core.axis_order_scope(['x', 'y', 'z']): _, _, broadcast_axes = core.align(yz_lt, xz_lt) self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z']) with core.axis_order_scope(['x', 'y']): with self.assertRaises(core.AxisOrderError): core.align(xz_lt, yz_lt) with self.assertRaises(core.AxisOrderError): core.align(yz_lt, xz_lt) def test_invalid_input(self): lt_0 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(5))]) lt_1 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(1, 6))]) with self.assertRaises(ValueError): core.align(lt_0, lt_1) class ConvertToLabeledTensorTest(Base): # TODO(shoyer): Simplify these tests once we can reuse labeled tensors in # assertLabeledTensorsEqual. def test_labeled_tensor(self): actual = core.convert_to_labeled_tensor(self.original_lt) self.assertLabeledTensorsEqual(actual, self.original_lt) def test_python_scalar(self): actual = core.convert_to_labeled_tensor(42) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_numpy_array(self): actual = core.convert_to_labeled_tensor(np.array(42)) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_tensor(self): actual = core.convert_to_labeled_tensor(constant_op.constant(42)) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): core.convert_to_labeled_tensor(math_ops.range(5)) with self.assertRaises(ValueError): core.convert_to_labeled_tensor(np.array([1, 2])) class DocStringCheckMixin(object): # requires self.ops to be defined def test_function_docstring_and_name(self): for op_name, _, _, lt_op in self.ops: if lt_op is not None: self.assertIn('tf.%s' % op_name, lt_op.__doc__) self.assertEqual(op_name, lt_op.__name__) class UnaryOpsTestsMixin(object): # requires self.ops and self.test_lt to be defined def test_core_op(self): for op_name, _, tf_op, lt_op in self.ops: if tf_op is not None: golden_lt = core.LabeledTensor( tf_op(self.test_lt.tensor), self.test_lt.axes) actual_lt = lt_op(self.test_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt) def test_infix(self): for op_name, infix_op, _, _ in self.ops: if infix_op is not None: expected_lt = core.LabeledTensor( infix_op(self.test_lt.tensor), self.test_lt.axes) actual_lt = infix_op(self.test_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class CoreUnaryOpsTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin): def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) class LogicalNotTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin): def setUp(self): super(LogicalNotTest, self).setUp() self.ops = [('logical_not', operator.invert, math_ops.logical_not, core.logical_not),] self.test_lt = self.original_lt < 10 class BinaryOpsTestsMixin(object): # requires self.ops, self.test_lt_1, self.test_lt_2, self.test_lt_1_broadcast # and self.test_lt_2_broadcast to be defined def test_core_op(self): for op_name, _, tf_op, lt_op in self.ops: golden_tensor = tf_op(self.test_lt_1_broadcast, self.test_lt_2_broadcast) golden_lt = core.LabeledTensor(golden_tensor, self.broadcast_axes) actual_lt = lt_op(self.test_lt_1, self.test_lt_2) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt) def test_infix(self): for op_name, infix_op, _, lt_op in self.ops: if infix_op is not None: expected_lt = lt_op(self.test_lt_1, self.test_lt_2) actual_lt = infix_op(self.test_lt_1, self.test_lt_2) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class CoreBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3] def test_reflexive(self): labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division for op_name, infix_op, _, lt_op in self.ops: if infix_op is not None: expected_lt = lt_op(2, labeled_tensor) actual_lt = infix_op(2, labeled_tensor) # Python uses greater for the reflexive version of less (and vise-versa) if 'less' in op_name: op_name = op_name.replace('less', 'greater') elif 'greater' in op_name: op_name = op_name.replace('greater', 'less') self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class LogicalBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(LogicalBinaryOpsTest, self).setUp() self.ops = [ ('logical_and', operator.and_, math_ops.logical_and, core.logical_and), ('logical_or', operator.or_, math_ops.logical_or, core.logical_or), ('logical_xor', operator.xor, math_ops.logical_xor, core.logical_xor), ] self.test_lt_1 = self.original_lt < 10 self.test_lt_2 = self.original_lt < 5 self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes class FloatBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(FloatBinaryOpsTest, self).setUp() self.ops = [ ('igamma', None, math_ops.igamma, core.igamma), ('igammac', None, math_ops.igammac, core.igammac), ('zeta', None, math_ops.zeta, core.zeta), ('polygamma', None, math_ops.polygamma, core.polygamma), ('maximum', None, math_ops.maximum, core.maximum), ('minimum', None, math_ops.minimum, core.minimum), ('squared_difference', None, math_ops.squared_difference, core.squared_difference), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) self.test_lt_1 = test_lt self.test_lt_2 = 1.0 - test_lt self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes if __name__ == '__main__': test_lib.main()
ashaarunkumar/spark-tk
refs/heads/master
python/sparktk/frame/ops/filter.py
14
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from sparktk.frame.row import Row def filter(self, predicate): """ Select all rows which satisfy a predicate. Modifies the current frame to save defined rows and delete everything else. Parameters ---------- :param predicate: (UDF) Function which evaluates a row to a boolean; rows that answer False are dropped from the frame. Examples -------- >>> frame = tc.frame.create([['Fred',39,16,'555-1234'], ... ['Susan',33,3,'555-0202'], ... ['Thurston',65,26,'555-4510'], ... ['Judy',44,14,'555-2183']], ... schema=[('name', str), ('age', int), ('tenure', int), ('phone', str)]) >>> frame.inspect() [#] name age tenure phone ==================================== [0] Fred 39 16 555-1234 [1] Susan 33 3 555-0202 [2] Thurston 65 26 555-4510 [3] Judy 44 14 555-2183 >>> frame.filter(lambda row: row.tenure >= 15) # keep only people with 15 or more years tenure >>> frame.inspect() [#] name age tenure phone ==================================== [0] Fred 39 16 555-1234 [1] Thurston 65 26 555-4510 More information on a |UDF| can be found at :doc:`/ds_apir`. """ row = Row(self.schema) def filter_func(r): row._set_data(r) return predicate(row) self._python.rdd = self._python.rdd.filter(filter_func)
thebarbershopper/Empire
refs/heads/master
lib/modules/situational_awareness/host/dnsserver.py
22
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Get-SystemDNSServer', 'Author': ['DarkOperator'], 'Description': ('Enumerates the DNS Servers used by a system.'), 'Background' : False, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : True, 'MinPSVersion' : '2', 'Comments': [ 'https://github.com/darkoperator/Posh-SecMod/blob/master/Discovery/Discovery.psm1' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): script = """ function Get-SystemDNSServer { <# .Synopsis Enumerates the DNS Servers used by a system Part of Posh-SecMod (https://github.com/darkoperator/Posh-SecMod/) Author: darkoperator .DESCRIPTION Enumerates the DNS Servers used by a system returning an IP Address .Net object for each. .EXAMPLE C:\> Get-SystemDNSServer Address : 16885952 AddressFamily : InterNetwork ScopeId : IsIPv6Multicast : False IsIPv6LinkLocal : False IsIPv6SiteLocal : False IsIPv6Teredo : False IsIPv4MappedToIPv6 : False IPAddressToString : 192.168.1.1 #> $DNSServerAddresses = @() $interfaces = [System.Net.NetworkInformation.NetworkInterface]::GetAllNetworkInterfaces() foreach($interface in $interfaces) { if($interface.OperationalStatus -eq "Up") { $DNSConfig = $interface.GetIPProperties().DnsAddresses if (!$DNSConfig.IsIPv6SiteLocal) { $DNSServerAddresses += $DNSConfig } } } $DNSServerAddresses } Get-SystemDNSServer""" for option,values in self.options.iteritems(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": # if we're just adding a switch script += " -" + str(option) else: script += " -" + str(option) + " " + str(values['Value']) return script
blooparksystems/odoo
refs/heads/9.0
openerp/api.py
1
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. """ This module provides the elements for managing two different API styles, namely the "traditional" and "record" styles. In the "traditional" style, parameters like the database cursor, user id, context dictionary and record ids (usually denoted as ``cr``, ``uid``, ``context``, ``ids``) are passed explicitly to all methods. In the "record" style, those parameters are hidden into model instances, which gives it a more object-oriented feel. For instance, the statements:: model = self.pool.get(MODEL) ids = model.search(cr, uid, DOMAIN, context=context) for rec in model.browse(cr, uid, ids, context=context): print rec.name model.write(cr, uid, ids, VALUES, context=context) may also be written as:: env = Environment(cr, uid, context) # cr, uid, context wrapped in env model = env[MODEL] # retrieve an instance of MODEL recs = model.search(DOMAIN) # search returns a recordset for rec in recs: # iterate over the records print rec.name recs.write(VALUES) # update all records in recs Methods written in the "traditional" style are automatically decorated, following some heuristics based on parameter names. """ __all__ = [ 'Environment', 'Meta', 'guess', 'noguess', 'model', 'multi', 'one', 'cr', 'cr_context', 'cr_uid', 'cr_uid_context', 'cr_uid_id', 'cr_uid_id_context', 'cr_uid_ids', 'cr_uid_ids_context', 'constrains', 'depends', 'onchange', 'returns', ] import logging import operator from inspect import currentframe, getargspec from collections import defaultdict, MutableMapping from contextlib import contextmanager from pprint import pformat from weakref import WeakSet from werkzeug.local import Local, release_local from openerp.tools import frozendict, classproperty _logger = logging.getLogger(__name__) # The following attributes are used, and reflected on wrapping methods: # - method._constrains: set by @constrains, specifies constraint dependencies # - method._depends: set by @depends, specifies compute dependencies # - method._returns: set by @returns, specifies return model # - method._onchange: set by @onchange, specifies onchange fields # - method.clear_cache: set by @ormcache, used to clear the cache # # On wrapping method only: # - method._api: decorator function, used for re-applying decorator # - method._orig: original method # WRAPPED_ATTRS = ('__module__', '__name__', '__doc__', '_constrains', '_depends', '_onchange', '_returns', 'clear_cache') INHERITED_ATTRS = ('_returns',) class Meta(type): """ Metaclass that automatically decorates traditional-style methods by guessing their API. It also implements the inheritance of the :func:`returns` decorators. """ def __new__(meta, name, bases, attrs): # dummy parent class to catch overridden methods decorated with 'returns' parent = type.__new__(meta, name, bases, {}) for key, value in attrs.items(): if not key.startswith('__') and callable(value): # make the method inherit from decorators value = propagate(getattr(parent, key, None), value) # guess calling convention if none is given if not hasattr(value, '_api'): try: value = guess(value) except TypeError: pass attrs[key] = value return type.__new__(meta, name, bases, attrs) identity = lambda x: x def decorate(method, attr, value): """ Decorate ``method`` or its original method. """ if getattr(method, '_api', False): # decorate the original method, and re-apply the api decorator setattr(method._orig, attr, value) return method._api(method._orig) else: # simply decorate the method itself setattr(method, attr, value) return method def propagate(from_method, to_method): """ Propagate decorators from ``from_method`` to ``to_method``, and return the resulting method. """ if from_method: for attr in INHERITED_ATTRS: if hasattr(from_method, attr) and not hasattr(to_method, attr): to_method = decorate(to_method, attr, getattr(from_method, attr)) return to_method def constrains(*args): """ Decorates a constraint checker. Each argument must be a field name used in the check:: @api.one @api.constrains('name', 'description') def _check_description(self): if self.name == self.description: raise ValidationError("Fields name and description must be different") Invoked on the records on which one of the named fields has been modified. Should raise :class:`~openerp.exceptions.ValidationError` if the validation failed. """ return lambda method: decorate(method, '_constrains', args) def onchange(*args): """ Return a decorator to decorate an onchange method for given fields. Each argument must be a field name:: @api.onchange('partner_id') def _onchange_partner(self): self.message = "Dear %s" % (self.partner_id.name or "") In the form views where the field appears, the method will be called when one of the given fields is modified. The method is invoked on a pseudo-record that contains the values present in the form. Field assignments on that record are automatically sent back to the client. """ return lambda method: decorate(method, '_onchange', args) def depends(*args): """ Return a decorator that specifies the field dependencies of a "compute" method (for new-style function fields). Each argument must be a string that consists in a dot-separated sequence of field names:: pname = fields.Char(compute='_compute_pname') @api.one @api.depends('partner_id.name', 'partner_id.is_company') def _compute_pname(self): if self.partner_id.is_company: self.pname = (self.partner_id.name or "").upper() else: self.pname = self.partner_id.name One may also pass a single function as argument. In that case, the dependencies are given by calling the function with the field's model. """ if args and callable(args[0]): args = args[0] elif any('id' in arg.split('.') for arg in args): raise NotImplementedError("Compute method cannot depend on field 'id'.") return lambda method: decorate(method, '_depends', args) def returns(model, downgrade=None): """ Return a decorator for methods that return instances of ``model``. :param model: a model name, or ``'self'`` for the current model :param downgrade: a function ``downgrade(value)`` to convert the record-style ``value`` to a traditional-style output The decorator adapts the method output to the api style: ``id``, ``ids`` or ``False`` for the traditional style, and recordset for the record style:: @model @returns('res.partner') def find_partner(self, arg): ... # return some record # output depends on call style: traditional vs record style partner_id = model.find_partner(cr, uid, arg, context=context) # recs = model.browse(cr, uid, ids, context) partner_record = recs.find_partner(arg) Note that the decorated method must satisfy that convention. Those decorators are automatically *inherited*: a method that overrides a decorated existing method will be decorated with the same ``@returns(model)``. """ return lambda method: decorate(method, '_returns', (model, downgrade)) def make_wrapper(decorator, method, old_api, new_api): """ Return a wrapper method for ``method``. """ def wrapper(self, *args, **kwargs): # avoid hasattr(self, '_ids') because __getattr__() is overridden if '_ids' in self.__dict__: return new_api(self, *args, **kwargs) else: return old_api(self, *args, **kwargs) # propagate specific openerp attributes from method to wrapper for attr in WRAPPED_ATTRS: if hasattr(method, attr): setattr(wrapper, attr, getattr(method, attr)) wrapper._api = decorator wrapper._orig = method return wrapper def get_downgrade(method): """ Return a function `downgrade(value)` that adapts ``value`` from record-style to traditional-style, following the convention of ``method``. """ spec = getattr(method, '_returns', None) if spec: model, downgrade = spec return downgrade or (lambda value: value.ids) else: return lambda value: value def get_upgrade(method): """ Return a function `upgrade(self, value)` that adapts ``value`` from traditional-style to record-style, following the convention of ``method``. """ spec = getattr(method, '_returns', None) if spec: model, downgrade = spec if model == 'self': return lambda self, value: self.browse(value) else: return lambda self, value: self.env[model].browse(value) else: return lambda self, value: value def get_aggregate(method): """ Return a function `aggregate(self, value)` that aggregates record-style ``value`` for a method decorated with ``@one``. """ spec = getattr(method, '_returns', None) if spec: # value is a list of instances, concatenate them model, downgrade = spec if model == 'self': return lambda self, value: sum(value, self.browse()) else: return lambda self, value: sum(value, self.env[model].browse()) else: return lambda self, value: value def get_context_split(method): """ Return a function ``split`` that extracts the context from a pair of positional and keyword arguments:: context, args, kwargs = split(args, kwargs) """ pos = len(getargspec(method).args) - 1 def split(args, kwargs): if pos < len(args): return args[pos], args[:pos], kwargs else: return kwargs.pop('context', None), args, kwargs return split def model(method): """ Decorate a record-style method where ``self`` is a recordset, but its contents is not relevant, only the model is. Such a method:: @api.model def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) Notice that no ``ids`` are passed to the method in the traditional style. """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, [], context) result = method(recs, *args, **kwargs) return downgrade(result) return make_wrapper(model, method, old_api, method) def multi(method): """ Decorate a record-style method where ``self`` is a recordset. The method typically defines an operation on records. Such a method:: @api.multi def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = method(recs, *args, **kwargs) return downgrade(result) return make_wrapper(multi, method, old_api, method) def one(method): """ Decorate a record-style method where ``self`` is expected to be a singleton instance. The decorated method automatically loops on records, and makes a list with the results. In case the method is decorated with :func:`returns`, it concatenates the resulting instances. Such a method:: @api.one def method(self, args): return self.name may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) names = recs.method(args) names = model.method(cr, uid, ids, args, context=context) """ split = get_context_split(method) downgrade = get_downgrade(method) aggregate = get_aggregate(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = new_api(recs, *args, **kwargs) return downgrade(result) def new_api(self, *args, **kwargs): result = [method(rec, *args, **kwargs) for rec in self] return aggregate(self, result) return make_wrapper(one, method, old_api, new_api) def cr(method): """ Decorate a traditional-style method that takes ``cr`` as a parameter. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, args) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr, method, method, new_api) def cr_context(method): """ Decorate a traditional-style method that takes ``cr``, ``context`` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_context, method, method, new_api) def cr_uid(method): """ Decorate a traditional-style method that takes ``cr``, ``uid`` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid, method, method, new_api) def cr_uid_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``context`` as parameters. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, uid, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_context, method, method, new_api) def cr_uid_id(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``id`` as parameters. Such a method may be called in both record and traditional styles. In the record style, the method automatically loops on records. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids] return upgrade(self, result) return make_wrapper(cr_uid_id, method, method, new_api) def cr_uid_id_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``id``, ``context`` as parameters. Such a method:: @api.cr_uid_id def method(self, cr, uid, id, args, context=None): ... may be called in both record and traditional styles, like:: # rec = model.browse(cr, uid, id, context) rec.method(args) model.method(cr, uid, id, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids] return upgrade(self, result) return make_wrapper(cr_uid_id_context, method, method, new_api) def cr_uid_ids(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids`` as parameters. Such a method may be called in both record and traditional styles. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, self.ids, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_ids, method, method, new_api) def cr_uid_ids_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids``, ``context`` as parameters. Such a method:: @api.cr_uid_ids_context def method(self, cr, uid, ids, args, context=None): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) It is generally not necessary, see :func:`guess`. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args kwargs['context'] = context result = method(self._model, cr, uid, self.ids, *args, **kwargs) return upgrade(self, result) return make_wrapper(cr_uid_ids_context, method, method, new_api) def v7(method_v7): """ Decorate a method that supports the old-style api only. A new-style api may be provided by redefining a method with the same name and decorated with :func:`~.v8`:: @api.v7 def foo(self, cr, uid, ids, context=None): ... @api.v8 def foo(self): ... Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v8 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v7.__name__) method_v8 = getattr(method, '_v8', method) wrapper = make_wrapper(v7, method_v7, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def v8(method_v8): """ Decorate a method that supports the new-style api only. An old-style api may be provided by redefining a method with the same name and decorated with :func:`~.v7`:: @api.v8 def foo(self): ... @api.v7 def foo(self, cr, uid, ids, context=None): ... Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v7 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v8.__name__) method_v7 = getattr(method, '_v7', method) wrapper = make_wrapper(v8, method_v8, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def noguess(method): """ Decorate a method to prevent any effect from :func:`guess`. """ method._api = False return method def guess(method): """ Decorate ``method`` to make it callable in both traditional and record styles. This decorator is applied automatically by the model's metaclass, and has no effect on already-decorated methods. The API style is determined by heuristics on the parameter names: ``cr`` or ``cursor`` for the cursor, ``uid`` or ``user`` for the user id, ``id`` or ``ids`` for a list of record ids, and ``context`` for the context dictionary. If a traditional API is recognized, one of the decorators :func:`cr`, :func:`cr_context`, :func:`cr_uid`, :func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`, :func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method. Method calls are considered traditional style when their first parameter is a database cursor. """ if hasattr(method, '_api'): return method # introspection on argument names to determine api style args, vname, kwname, defaults = getargspec(method) names = tuple(args) + (None,) * 4 if names[0] == 'self': if names[1] in ('cr', 'cursor'): if names[2] in ('uid', 'user'): if names[3] == 'ids': if 'context' in names or kwname: return cr_uid_ids_context(method) else: return cr_uid_ids(method) elif names[3] == 'id' or names[3] == 'res_id': if 'context' in names or kwname: return cr_uid_id_context(method) else: return cr_uid_id(method) elif 'context' in names or kwname: return cr_uid_context(method) else: return cr_uid(method) elif 'context' in names: return cr_context(method) else: return cr(method) # no wrapping by default return noguess(method) def expected(decorator, func): """ Decorate ``func`` with ``decorator`` if ``func`` is not wrapped yet. """ return decorator(func) if not hasattr(func, '_api') else func class Environment(object): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It provides access to the registry by implementing a mapping from model names to new api models. It also holds a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = RegistryManager.get(cr.dbname) self.cache = defaultdict(dict) # {field: {id: value, ...}, ...} self.prefetch = defaultdict(set) # {model_name: set(id), ...} self.computed = defaultdict(set) # {field: set(id), ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self def __contains__(self, model_name): """ Test whether the given model exists. """ return model_name in self.registry def __getitem__(self, model_name): """ Return an empty recordset from the given model. """ return self.registry[model_name]._browse(self, ()) def __iter__(self): """ Return an iterator on model names. """ return iter(self.registry) def __len__(self): """ Return the size of the model registry. """ return len(self.registry) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given ``xml_id`` """ return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.all.mode: yield else: try: self.all.mode = mode yield finally: self.all.mode = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.all.mode) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.all.mode == 'onchange' def invalidate(self, spec): """ Invalidate some fields for some records in the cache of all environments. :param spec: what to invalidate, a list of `(field, ids)` pair, where ``field`` is a field object, and ``ids`` is a list of record ids or ``None`` (to invalidate all records). """ if not spec: return for env in list(self.all): c = env.cache for field, ids in spec: if ids is None: if field in c: del c[field] else: field_cache = c[field] for id in ids: field_cache.pop(id, None) def invalidate_all(self): """ Clear the cache of all environments. """ for env in list(self.all): env.cache.clear() env.prefetch.clear() env.computed.clear() env.dirty.clear() def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.invalidate_all() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def field_todo(self, field): """ Return a recordset with all records to recompute for ``field``. """ ids = {rid for recs in self.all.todo.get(field, ()) for rid in recs.ids} return self[field.model_name].browse(ids) def check_todo(self, field, record): """ Check whether ``field`` must be recomputed on ``record``, and if so, return the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark ``field`` to be recomputed on ``records``. """ recs_list = self.all.todo.setdefault(field, []) for i, recs in enumerate(recs_list): if recs.env == records.env: recs_list[i] |= records break else: recs_list.append(records) def remove_todo(self, field, records): """ Mark ``field`` as recomputed on ``records``. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = filter(None, recs_list) if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair `(field, records)` to recompute. """ for field, recs_list in self.all.todo.iteritems(): return field, recs_list[0] def check_cache(self): """ Check the cache consistency. """ # make a full copy of the cache, and invalidate it cache_dump = dict( (field, dict(field_cache)) for field, field_cache in self.cache.iteritems() ) self.invalidate_all() # re-fetch the records, and compare with their former cache invalids = [] for field, field_dump in cache_dump.iteritems(): ids = filter(None, field_dump) records = self[field.model_name].browse(ids) for record in records: try: cached = field_dump[record.id] fetched = record[field.name] if fetched != cached: info = {'cached': cached, 'fetched': fetched} invalids.append((field, record, info)) except (AccessError, MissingError): pass if invalids: raise UserError('Invalid cache for fields\n' + pformat(invalids)) @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp @property def recompute_old(self): return self.all.recompute_old def clear_recompute_old(self): del self.all.recompute_old[:] class Environments(object): """ A common object for all environments in a request. """ def __init__(self): self.envs = WeakSet() # weak set of environments self.todo = {} # recomputations {field: [records]} self.mode = False # flag for draft/onchange self.recompute = True self.recompute_old = [] # list of old api compute fields to recompute def add(self, env): """ Add the environment ``env``. """ self.envs.add(env) def __iter__(self): """ Iterate over environments. """ return iter(self.envs) # keep those imports here in order to handle cyclic dependencies correctly from openerp import SUPERUSER_ID from openerp.exceptions import UserError, AccessError, MissingError from openerp.modules.registry import RegistryManager
IshankGulati/scikit-learn
refs/heads/master
sklearn/utils/__init__.py
13
""" The :mod:`sklearn.utils` module includes various utilities. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, assert_all_finite, check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric) from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count from ..exceptions import DataConversionWarning from .deprecation import deprecated __all__ = ["murmurhash3_32", "as_float_array", "assert_all_finite", "check_array", "check_random_state", "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', "check_symmetric", "indices_to_mask", "deprecated"] def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : array Mask to be used on X. Returns ------- mask """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def axis0_safe_slice(X, mask, len_mask): """ This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. """ if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1])) def safe_indexing(X, indices): """Return items or rows from X using indices. Allows simple indexing of lists or arrays. Parameters ---------- X : array-like, sparse-matrix, list. Data from which to sample rows or items. indices : array-like, list Indices according to which X will be subsampled. """ if hasattr(X, "iloc"): # Pandas Dataframes and Series try: return X.iloc[indices] except ValueError: # Cython typed memoryviews internally used in pandas do not support # readonly buffers. warnings.warn("Copying input dataframe for slicing.", DataConversionWarning) return X.copy().iloc[indices] elif hasattr(X, "shape"): if hasattr(X, 'take') and (hasattr(indices, 'dtype') and indices.dtype.kind == 'i'): # This is often substantially faster than X[indices] return X.take(indices, axis=0) else: return X[indices] else: return [X[idx] for idx in indices] def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. If replace is False it should not be larger than the length of arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- resampled_arrays : sequence of indexable data-structures Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples elif (max_n_samples > n_samples) and (not replace): raise ValueError("Cannot sample %d out of arrays with dim %d " "when replace is False" % (max_n_samples, n_samples)) check_consistent_length(*arrays) if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix copy : boolean, optional, default True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_batches(n, batch_size): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size yield slice(start, end) start = end if start < n: yield slice(start, n) def gen_even_slices(n, n_packs, n_samples=None): """Generator to create n_packs slices going up to n. Pass n_samples when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 if n_packs < 1: raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end def _get_n_jobs(n_jobs): """Get number of jobs for the computation. This function reimplements the logic of joblib to determine the actual number of jobs depending on the cpu count. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Parameters ---------- n_jobs : int Number of jobs stated in joblib convention. Returns ------- n_jobs : int The actual number of jobs as positive integer. Examples -------- >>> from sklearn.utils import _get_n_jobs >>> _get_n_jobs(4) 4 >>> jobs = _get_n_jobs(-2) >>> assert jobs == max(cpu_count() - 1, 1) >>> _get_n_jobs(0) Traceback (most recent call last): ... ValueError: Parameter n_jobs == 0 has no meaning. """ if n_jobs < 0: return max(cpu_count() + 1 + n_jobs, 1) elif n_jobs == 0: raise ValueError('Parameter n_jobs == 0 has no meaning.') else: return n_jobs def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x) def indices_to_mask(indices, mask_length): """Convert list of indices to boolean mask. Parameters ---------- indices : list-like List of integers treated as indices. mask_length : int Length of boolean mask to be generated. Returns ------- mask : 1d boolean nd-array Boolean array that is True where indices are present, else False. """ if mask_length <= np.max(indices): raise ValueError("mask_length must be greater than max(indices)") mask = np.zeros(mask_length, dtype=np.bool) mask[indices] = True return mask
P0cL4bs/WiFi-Pumpkin
refs/heads/master
plugins/external/Responder/servers/IMAP.py
5
#!/usr/bin/env python # This file is part of Responder # Original work by Laurent Gaffie - Trustwave Holdings # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from utils import * from SocketServer import BaseRequestHandler from packets import IMAPGreeting, IMAPCapability, IMAPCapabilityEnd class IMAP(BaseRequestHandler): def handle(self): try: self.request.send(str(IMAPGreeting())) data = self.request.recv(1024) if data[5:15] == "CAPABILITY": RequestTag = data[0:4] self.request.send(str(IMAPCapability())) self.request.send(str(IMAPCapabilityEnd(Tag=RequestTag))) data = self.request.recv(1024) if data[5:10] == "LOGIN": Credentials = data[10:].strip() SaveToDb({ 'module': 'IMAP', 'type': 'Cleartext', 'client': self.client_address[0], 'user': Credentials[0], 'cleartext': Credentials[1], 'fullhash': Credentials[0]+":"+Credentials[1], }) ## FIXME: Close connection properly ## self.request.send(str(ditchthisconnection())) ## data = self.request.recv(1024) except Exception: pass
eSpark/phabricator
refs/heads/master
externals/twilio-php/docs/_themes/flask_theme_support.py
2228
# flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' }
mdtraj/mdtraj
refs/heads/master
mdtraj/utils/validation.py
4
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## ############################################################################## # imports ############################################################################## from __future__ import print_function, division import warnings import numbers import numpy as np import collections from mdtraj.utils.six.moves import zip_longest ############################################################################## # functions / classes ############################################################################## class TypeCastPerformanceWarning(RuntimeWarning): pass def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None, warn_on_cast=True, add_newaxis_on_deficient_ndim=False): """Typecheck the size, shape and dtype of a numpy array, with optional casting. Parameters ---------- val : {np.ndaraay, None} The array to check dtype : {nd.dtype, str} The dtype you'd like the array to have ndim : int The number of dimensions you'd like the array to have name : str name of the array. This is used when throwing exceptions, so that we can describe to the user which array is messed up. length : int, optional How long should the array be? can_be_none : bool Is ``val == None`` acceptable? shape : tuple, optional What should be shape of the array be? If the provided tuple has Nones in it, those will be semantically interpreted as matching any length in that dimension. So, for example, using the shape spec ``(None, None, 3)`` will ensure that the last dimension is of length three without constraining the first two dimensions warn_on_cast : bool, default=True Raise a warning when the dtypes don't match and a cast is done. add_newaxis_on_deficient_ndim : bool, default=True Add a new axis to the beginining of the array if the number of dimensions is deficient by one compared to your specification. For instance, if you're trying to get out an array of ``ndim == 3``, but the user provides an array of ``shape == (10, 10)``, a new axis will be created with length 1 in front, so that the return value is of shape ``(1, 10, 10)``. Notes ----- The returned value will always be C-contiguous. Returns ------- typechecked_val : np.ndarray, None If `val=None` and `can_be_none=True`, then this will return None. Otherwise, it will return val (or a copy of val). If the dtype wasn't right, it'll be casted to the right shape. If the array was not C-contiguous, it'll be copied as well. """ if can_be_none and val is None: return None if not isinstance(val, np.ndarray): if isinstance(val, collections.abc.Iterable): # If they give us an iterator, let's try... if isinstance(val, collections.abc.Sequence): # sequences are easy. these are like lists and stuff val = np.array(val, dtype=dtype) else: # this is a generator... val = np.array(list(val), dtype=dtype) elif np.isscalar(val) and add_newaxis_on_deficient_ndim and ndim == 1: # special case: if the user is looking for a 1d array, and # they request newaxis upconversion, and provided a scalar # then we should reshape the scalar to be a 1d length-1 array val = np.array([val]) else: raise TypeError(("%s must be numpy array. " " You supplied type %s" % (name, type(val)))) if warn_on_cast and val.dtype != dtype: warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype), TypeCastPerformanceWarning) if not val.ndim == ndim: if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim: val = val[np.newaxis, ...] else: raise ValueError(("%s must be ndim %s. " "You supplied %s" % (name, ndim, val.ndim))) val = np.ascontiguousarray(val, dtype=dtype) if length is not None and len(val) != length: raise ValueError(("%s must be length %s. " "You supplied %s" % (name, length, len(val)))) if shape is not None: # the shape specified given by the user can look like (None, None 3) # which indicates that ANY length is accepted in dimension 0 or # dimension 1 sentenel = object() error = ValueError(("%s must be shape %s. You supplied " "%s" % (name, str(shape).replace('None', 'Any'), val.shape))) for a, b in zip_longest(val.shape, shape, fillvalue=sentenel): if a is sentenel or b is sentenel: # if the sentenel was reached, it means that the ndim didn't # match or something. this really shouldn't happen raise error if b is None: # if the user's shape spec has a None in it, it matches anything continue if a != b: # check for equality raise error return val def cast_indices(indices): """Check that ``indices`` are appropriate for indexing an array Parameters ---------- indices : {None, array_like, slice} If indices is None or slice, it'll just pass through. Otherwise, it'll be converted to a numpy array and checked to make sure it contains unique integers. Returns ------- value : {slice, np.ndarray} Either a slice or an array of integers, depending on the input type """ if indices is None or isinstance(indices, slice): return indices if not len(indices) == len(set(indices)): raise ValueError("indices must be unique.") out = np.asarray(indices) if not issubclass(out.dtype.type, np.integer): raise ValueError('indices must be of an integer type. %s is not an integer type' % out.dtype) return out def check_random_state(seed): """Turn seed into a np.random.RandomState instance Parameters ---------- seed : {None, int, RandomState} Seed for a random number generator Returns ------- randomstate : RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ # This code is direcly from the scikit-learn project (sklearn/utils/validation.py) # Authors: Olivier Grisel and Gael Varoquaux and others (please update me) # License: BSD 3 clause if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed)
inares/edx-platform
refs/heads/inares_sass
lms/djangoapps/verify_student/tests/fake_software_secure.py
73
""" Fake Software Secure page for use in acceptance tests. """ from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.utils.decorators import method_decorator from django.views.generic.base import View from edxmako.shortcuts import render_to_response from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification class SoftwareSecureFakeView(View): """ Fake SoftwareSecure view for testing different photo verification statuses and email functionality. """ @method_decorator(login_required) def get(self, request): """ Render a fake Software Secure page that will pick the most recent attempt for a given user and pass it to the html page. """ context_dict = self.response_post_params(request.user) return render_to_response("verify_student/test/fake_softwaresecure_response.html", context_dict) @classmethod def response_post_params(cls, user): """ Calculate the POST params we want to send back to the client. """ access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"] context = { 'receipt_id': None, 'authorization_code': 'SIS {}:0000'.format(access_key), 'results_callback': reverse('verify_student_results_callback') } try: most_recent = SoftwareSecurePhotoVerification.objects.filter(user=user).order_by("-updated_at")[0] context["receipt_id"] = most_recent.receipt_id except: # pylint: disable=bare-except pass return context
robertmattmueller/sdac-compiler
refs/heads/master
sympy/physics/optics/tests/test_waves.py
14
from sympy import (symbols, Symbol, pi, sqrt, cos, sin, Derivative, Function, simplify, C, I, atan2) from sympy.abc import x, epsilon, mu from sympy.physics.units import c, m, s from sympy.physics.optics import TWave def test_twave(): A1, phi1, A2, phi2, f = symbols('A1, phi1, A2, phi2, f') n = Symbol('n') # Refractive index t = Symbol('t') # Time x = Symbol('x') # Spatial varaible k = Symbol('k') # Wave number E = Function('E') exp = C.exp w1 = TWave(A1, f, phi1) w2 = TWave(A2, f, phi2) assert w1.amplitude == A1 assert w1.frequency == f assert w1.phase == phi1 assert w1.wavelength == c/(f*n) assert w1.time_period == 1/f w3 = w1 + w2 assert w3.amplitude == sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2) assert w3.frequency == f assert w3.wavelength == c/(f*n) assert w3.time_period == 1/f assert w3.angular_velocity == 2*pi*f assert w3.wavenumber == 2*pi*f*n/c assert simplify(w3.rewrite('sin') - sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)*sin(pi*f*n*x*s/(149896229*m) - 2*pi*f*t + atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2)) + pi/2)) == 0 assert w3.rewrite('pde') == epsilon*mu*Derivative(E(x, t), t, t) + Derivative(E(x, t), x, x) assert w3.rewrite(cos) == sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)*cos(pi*f*n*x*s/(149896229*m) - 2*pi*f*t + atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2))) assert w3.rewrite('exp') == sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)*exp(I*(pi*f*n*x*s/(149896229*m) - 2*pi*f*t + atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2))))
rfk/promise
refs/heads/master
promise/tests/finder.py
2
import promise items = range(100) def verify(finder): """Check that the given finder function works correctly.""" assert finder(0) assert finder(42) assert not finder(101) assert not finder(1001) def finder0(item): """Base 'finder' fuction; is quite stupid and slow.""" i = 0 while i < len(items): if items[i] == item: return True i += 1 return False @promise.invariant(["items"]) def finder1(item): """Finder function storing 'len' in a local variable.""" i = 0 while i < len(items): if items[i] == item: return True i += 1 return False @promise.sensible() def finder2(item): """Finder function assumed to have sensible behaviour. 'items' is considered invariant; 'len', 'True' and 'False' are constant. """ i = 0 while i < len(items): if items[i] == item: return True i += 1 return False
opencloudinfra/orchestrator
refs/heads/master
venv/Lib/site-packages/django/conf/locale/sk/formats.py
504
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j. F Y G:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y G:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%y-%m-%d', # '06-10-25' # '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3