code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
from a10sdk.common.A10BaseClass import A10BaseClass class Stats(A10BaseClass): """This class does not support CRUD Operations please use parent. :param dslite_data_session_freed: {"description": "DS-Lite Data Sessions Freed", "format": "counter", "type": "number", "oid": "13", "optional": true, "size": "8"} :param nat64_eim_match: {"description": "NAT64 Endpoint-Independent-Mapping Matched", "format": "counter", "type": "number", "oid": "41", "optional": true, "size": "8"} :param total_icmp_freed: {"description": "Total ICMP Ports Freed", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"} :param nat64_udp_alg_fullcone_created: {"description": "NAT64 UDP ALG Full-Cone Created", "format": "counter", "type": "number", "oid": "31", "optional": true, "size": "8"} :param fullcone_failure: {"description": "Full-Cone Session Creation Failed", "format": "counter", "type": "number", "oid": "39", "optional": true, "size": "8"} :param nat44_eim_match: {"description": "NAT44 Endpoint-Independent-Mapping Matched", "format": "counter", "type": "number", "oid": "40", "optional": true, "size": "8"} :param nat44_tcp_fullcone_created: {"description": "NAT44 TCP Full-Cone Created", "format": "counter", "type": "number", "oid": "21", "optional": true, "size": "8"} :param nat44_eif_match: {"description": "NAT44 Endpoint-Independent-Filtering Matched", "format": "counter", "type": "number", "oid": "43", "optional": true, "size": "8"} :param nat64_data_session_created: {"description": "NAT64 Data Sessions Created", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"} :param nat64_eif_match: {"description": "NAT64 Endpoint-Independent-Filtering Matched", "format": "counter", "type": "number", "oid": "44", "optional": true, "size": "8"} :param nat44_hairpin: {"description": "NAT44 Hairpin Session Created", "format": "counter", "type": "number", "oid": "52", "optional": true, "size": "8"} :param total_icmp_allocated: {"description": "Total ICMP Ports Allocated", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"} :param nat64_udp_fullcone_freed: {"description": "NAT64 UDP Full-Cone Freed", "format": "counter", "type": "number", "oid": "30", "optional": true, "size": "8"} :param total_tcp_freed: {"description": "Total TCP Ports Freed", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"} :param dest_rlist_snat_drop: {"description": "Fixed NAT Dest Rules List Source NAT Drop", "format": "counter", "type": "number", "oid": "60", "optional": true, "size": "8"} :param nat44_udp_alg_fullcone_freed: {"description": "NAT44 UDP ALG Full-Cone Freed", "format": "counter", "type": "number", "oid": "26", "optional": true, "size": "8"} :param dslite_tcp_fullcone_freed: {"description": "DS-Lite TCP Full-Cone Freed", "format": "counter", "type": "number", "oid": "34", "optional": true, "size": "8"} :param 6rd_drop: {"description": "Fixed NAT IPv6 in IPv4 Packet Drop", "format": "counter", "type": "number", "oid": "57", "optional": true, "size": "8"} :param nat64_inbound_filtered: {"description": "NAT64 Endpoint-Dependent Filtering Drop", "format": "counter", "type": "number", "oid": "47", "optional": true, "size": "8"} :param dslite_hairpin: {"description": "DS-Lite Hairpin Session Created", "format": "counter", "type": "number", "oid": "54", "optional": true, "size": "8"} :param nat44_udp_alg_fullcone_created: {"description": "NAT44 UDP ALG Full-Cone Created", "format": "counter", "type": "number", "oid": "25", "optional": true, "size": "8"} :param nat_port_unavailable_udp: {"description": "UDP NAT Port Unavailable", "format": "counter", "type": "number", "oid": "15", "optional": true, "size": "8"} :param nat64_tcp_fullcone_created: {"description": "NAT64 TCP Full-Cone Created", "format": "counter", "type": "number", "oid": "27", "optional": true, "size": "8"} :param dest_rlist_pass_through: {"description": "Fixed NAT Dest Rule List Pass-Through", "format": "counter", "type": "number", "oid": "59", "optional": true, "size": "8"} :param nat64_tcp_fullcone_freed: {"description": "NAT64 TCP Full-Cone Freed", "format": "counter", "type": "number", "oid": "28", "optional": true, "size": "8"} :param dslite_udp_fullcone_created: {"description": "DS-Lite UDP Full-Cone Created", "format": "counter", "type": "number", "oid": "35", "optional": true, "size": "8"} :param total_udp_freed: {"description": "Total UDP Ports Freed", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"} :param nat44_eif_limit_exceeded: {"description": "NAT44 Endpoint-Independent-Filtering Limit Exceeded", "format": "counter", "type": "number", "oid": "49", "optional": true, "size": "8"} :param dslite_udp_alg_fullcone_created: {"description": "DS-Lite UDP ALG Full-Cone Created", "format": "counter", "type": "number", "oid": "37", "optional": true, "size": "8"} :param total_tcp_allocated: {"description": "Total TCP Ports Allocated", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"} :param nat64_udp_fullcone_created: {"description": "NAT64 UDP Full-Cone Created", "format": "counter", "type": "number", "oid": "29", "optional": true, "size": "8"} :param nat44_tcp_fullcone_freed: {"description": "NAT44 TCP Full-Cone Freed", "format": "counter", "type": "number", "oid": "22", "optional": true, "size": "8"} :param dslite_tcp_fullcone_created: {"description": "DS-Lite TCP Full-Cone Created", "format": "counter", "type": "number", "oid": "33", "optional": true, "size": "8"} :param dslite_inbound_filtered: {"description": "DS-Lite Endpoint-Dependent Filtering Drop", "format": "counter", "type": "number", "oid": "48", "optional": true, "size": "8"} :param nat44_udp_fullcone_created: {"description": "NAT44 UDP Full-Cone Created", "format": "counter", "type": "number", "oid": "23", "optional": true, "size": "8"} :param nat_port_unavailable_tcp: {"description": "TCP NAT Port Unavailable", "format": "counter", "type": "number", "oid": "14", "optional": true, "size": "8"} :param nat_port_unavailable_icmp: {"description": "ICMP NAT Port Unavailable", "format": "counter", "type": "number", "oid": "16", "optional": true, "size": "8"} :param nat44_data_session_created: {"description": "NAT44 Data Sessions Created", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "8"} :param nat44_inbound_filtered: {"description": "NAT44 Endpoint-Dependent Filtering Drop", "format": "counter", "type": "number", "oid": "46", "optional": true, "size": "8"} :param nat64_hairpin: {"description": "NAT64 Hairpin Session Created", "format": "counter", "type": "number", "oid": "53", "optional": true, "size": "8"} :param total_nat_in_use: {"description": "Total NAT Addresses in-use", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"} :param dslite_data_session_created: {"description": "DS-Lite Data Sessions Created", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "8"} :param nat64_eif_limit_exceeded: {"description": "NAT64 Endpoint-Independent-Filtering Limit Exceeded", "format": "counter", "type": "number", "oid": "50", "optional": true, "size": "8"} :param nat64_data_session_freed: {"description": "NAT64 Data Sessions Freed", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "8"} :param standby_drop: {"description": "Fixed NAT LID Standby Drop", "format": "counter", "type": "number", "oid": "55", "optional": true, "size": "8"} :param dslite_udp_fullcone_freed: {"description": "DS-Lite UDP Full-Cone Freed", "format": "counter", "type": "number", "oid": "36", "optional": true, "size": "8"} :param fixed_nat_fullcone_self_hairpinning_drop: {"description": "Self-Hairpinning Drop", "format": "counter", "type": "number", "oid": "56", "optional": true, "size": "8"} :param dslite_eim_match: {"description": "DS-Lite Endpoint-Independent-Mapping Matched", "format": "counter", "type": "number", "oid": "42", "optional": true, "size": "8"} :param total_udp_allocated: {"description": "Total UDP Ports Allocated", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"} :param dslite_udp_alg_fullcone_freed: {"description": "DS-Lite UDP ALG Full-Cone Freed", "format": "counter", "type": "number", "oid": "38", "optional": true, "size": "8"} :param dest_rlist_drop: {"description": "Fixed NAT Dest Rule List Drop", "format": "counter", "type": "number", "oid": "58", "optional": true, "size": "8"} :param nat64_udp_alg_fullcone_freed: {"description": "NAT64 UDP ALG Full-Cone Freed", "format": "counter", "type": "number", "oid": "32", "optional": true, "size": "8"} :param session_user_quota_exceeded: {"description": "Sessions User Quota Exceeded", "format": "counter", "type": "number", "oid": "20", "optional": true, "size": "8"} :param dslite_eif_limit_exceeded: {"description": "DS-Lite Endpoint-Independent-Filtering Limit Exceeded", "format": "counter", "type": "number", "oid": "51", "optional": true, "size": "8"} :param dslite_eif_match: {"description": "DS-Lite Endpoint-Independent-Filtering Matched", "format": "counter", "type": "number", "oid": "45", "optional": true, "size": "8"} :param nat44_data_session_freed: {"description": "NAT44 Data Sessions Freed", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"} :param nat44_udp_fullcone_freed: {"description": "NAT44 UDP Full-Cone Freed", "format": "counter", "type": "number", "oid": "24", "optional": true, "size": "8"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "stats" self.DeviceProxy = "" self.dslite_data_session_freed = "" self.nat64_eim_match = "" self.total_icmp_freed = "" self.nat64_udp_alg_fullcone_created = "" self.fullcone_failure = "" self.nat44_eim_match = "" self.nat44_tcp_fullcone_created = "" self.nat44_eif_match = "" self.nat64_data_session_created = "" self.nat64_eif_match = "" self.nat44_hairpin = "" self.total_icmp_allocated = "" self.nat64_udp_fullcone_freed = "" self.total_tcp_freed = "" self.dest_rlist_snat_drop = "" self.nat44_udp_alg_fullcone_freed = "" self.dslite_tcp_fullcone_freed = "" self.A10WW_6rd_drop = "" self.nat64_inbound_filtered = "" self.dslite_hairpin = "" self.nat44_udp_alg_fullcone_created = "" self.nat_port_unavailable_udp = "" self.nat64_tcp_fullcone_created = "" self.dest_rlist_pass_through = "" self.nat64_tcp_fullcone_freed = "" self.dslite_udp_fullcone_created = "" self.total_udp_freed = "" self.nat44_eif_limit_exceeded = "" self.dslite_udp_alg_fullcone_created = "" self.total_tcp_allocated = "" self.nat64_udp_fullcone_created = "" self.nat44_tcp_fullcone_freed = "" self.dslite_tcp_fullcone_created = "" self.dslite_inbound_filtered = "" self.nat44_udp_fullcone_created = "" self.nat_port_unavailable_tcp = "" self.nat_port_unavailable_icmp = "" self.nat44_data_session_created = "" self.nat44_inbound_filtered = "" self.nat64_hairpin = "" self.total_nat_in_use = "" self.dslite_data_session_created = "" self.nat64_eif_limit_exceeded = "" self.nat64_data_session_freed = "" self.standby_drop = "" self.dslite_udp_fullcone_freed = "" self.fixed_nat_fullcone_self_hairpinning_drop = "" self.dslite_eim_match = "" self.total_udp_allocated = "" self.dslite_udp_alg_fullcone_freed = "" self.dest_rlist_drop = "" self.nat64_udp_alg_fullcone_freed = "" self.session_user_quota_exceeded = "" self.dslite_eif_limit_exceeded = "" self.dslite_eif_match = "" self.nat44_data_session_freed = "" self.nat44_udp_fullcone_freed = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Global(A10BaseClass): """Class Description:: Statistics for the object global. Class global supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/cgnv6/fixed-nat/global/stats`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "global" self.a10_url="/axapi/v3/cgnv6/fixed-nat/global/stats" self.DeviceProxy = "" self.stats = {} for keys, value in kwargs.items(): setattr(self,keys, value)
amwelch/a10sdk-python
a10sdk/core/cgnv6/cgnv6_fixed_nat_global_stats.py
Python
apache-2.0
13,296
# -*- coding: utf-8 -*- # Copyright 2012-2013 UNED # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from optparse import make_option from datetime import datetime import tarfile import StringIO from django.core import serializers from django.core.management.base import BaseCommand, CommandError from django.utils import simplejson from moocng.courses.models import Course class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('-c', '--course', action='store', dest='course', default="", help='Course slug to export'), make_option('-f', '--filename', action='store', dest='filename', default="", help="Filename to save the course (without file extension)"), ) def error(self, message): self.stderr.write("%s\n" % message.encode("ascii", "replace")) def message(self, message): self.stdout.write("%s\n" % message.encode("ascii", "replace")) def save_file(self, filefield): self.message("Saving file %s" % filefield.name) self.tar.add(filefield.file.name, filefield.name) def properties_dict(self, obj): data = serializers.serialize("json", [obj], use_natural_keys=True) _dict = simplejson.loads(data)[0] return _dict def option_dict(self, option): return self.properties_dict(option) def question_dict(self, question): _dict = self.properties_dict(question) options = [] for option in question.option_set.all(): options.append(self.option_dict(option)) _dict["options"] = options return _dict def kq_dict(self, kq): _dict = self.properties_dict(kq) questions = [] for question in kq.question_set.all(): questions.append(self.question_dict(question)) self.save_file(question.last_frame) _dict["questions"] = questions attachments = [] for attachment in kq.attachment_set.all(): attachments.append(self.properties_dict(attachment)) self.save_file(attachment.attachment) _dict["attachments"] = attachments return _dict def unit_dict(self, unit): _dict = self.properties_dict(unit) kqs = [] for kq in unit.knowledgequantum_set.all(): kqs.append(self.kq_dict(kq)) _dict["knowledgequantums"] = kqs return _dict def course_dict(self, course): _dict = self.properties_dict(course) units = [] for unit in course.unit_set.all(): units.append(self.unit_dict(unit)) _dict["units"] = units return _dict def handle(self, *args, **options): if not options["course"]: raise CommandError("--course / -c param is required") try: course = Course.objects.get(slug=options["course"]) except Course.DoesNotExist: raise CommandError(u"Course %s does not exist" % options["course"]) filename = options.get("filename", "") if not filename: today = datetime.today() timestamp = today.strftime("%Y%m%d-%H%M%S") filename = "%s-%s" % (course.slug, timestamp) self.filename = filename self.tar = tarfile.open("%s.tgz" % filename, "w:gz") course_dict = self.course_dict(course) coursefile = StringIO.StringIO() coursefile.write(simplejson.dumps(course_dict)) coursefile.seek(0) self.message("Saving course metadata file course.json") tarinfo = tarfile.TarInfo(name="course.json") tarinfo.size = len(coursefile.buf) self.tar.addfile(tarinfo=tarinfo, fileobj=coursefile) self.tar.close() self.message("\n\nCourse saved in %s.tar.gz" % filename)
OpenMOOC/moocng
moocng/courses/management/commands/export_course.py
Python
apache-2.0
4,436
from django.core.exceptions import ImproperlyConfigured from django.test import TestCase from geotrek.common.management.commands.migrate import check_srid_has_meter_unit class StartupCheckTest(TestCase): def test_error_is_raised_if_srid_is_not_meters(self): if hasattr(check_srid_has_meter_unit, '_checked'): delattr(check_srid_has_meter_unit, '_checked') with self.settings(SRID=4326): self.assertRaises(ImproperlyConfigured, check_srid_has_meter_unit) def test_error_is_not_raised_if_srid_is_meters(self): if hasattr(check_srid_has_meter_unit, '_checked'): delattr(check_srid_has_meter_unit, '_checked') with self.settings(SRID=2154): check_srid_has_meter_unit
GeotrekCE/Geotrek-admin
geotrek/common/tests/test_conf.py
Python
bsd-2-clause
755
# -*- coding: utf-8 -*- """Processor for remote INDRA JSON files.""" import requests from typing import List from ..statements import Statement, print_stmt_summary, stmts_from_json __all__ = [ 'RemoteProcessor', ] class RemoteProcessor: """A processor for INDRA JSON file to be retrieved by URL. Parameters ---------- url : The URL of the INDRA JSON file to load """ #: The URL of the data url: str def __init__(self, url: str): self.url = url self._statements = None @property def statements(self) -> List[Statement]: """The extracted statements.""" if self._statements is None: self.extract_statements() return self._statements def extract_statements(self) -> List[Statement]: """Extract statements from the remote JSON file.""" res = requests.get(self.url) res.raise_for_status() self._statements = stmts_from_json(res.json()) return self._statements def print_summary(self) -> None: """Print a summary of the statements.""" print_stmt_summary(self.statements)
johnbachman/indra
indra/sources/utils.py
Python
bsd-2-clause
1,144
from dnollkse.viewHelper import render from faq.models import Faq def faq(request): faq = Faq.objects.all() return render(request, "faq/index.dtl", {'faq': faq})
dtekcth/DNollK.se
faq/views.py
Python
bsd-2-clause
172
""" implementations of internal DB commands for YubiHSM """ # Copyright (c) 2011 Yubico AB # See the file COPYING for licence statement. import struct __all__ = [ # constants # functions # classes 'YHSM_Cmd_DB_YubiKey_Store', 'YHSM_Cmd_DB_Validate_OTP', ] import pyhsm.defines import pyhsm.exception import pyhsm.aead_cmd import pyhsm.validate_cmd from pyhsm.cmd import YHSM_Cmd class YHSM_Cmd_DB_YubiKey_Store(YHSM_Cmd): """ Ask YubiHSM to store data about a YubiKey in the internal database (not buffer). The input is an AEAD, perhaps previously created using generate_aead(). If the nonce for the AEAD is not the same as the public_id, specify it with the nonce keyword argument. This requires a YubiHSM >= 1.0.4. """ status = None def __init__(self, stick, public_id, key_handle, aead, nonce = None): self.key_handle = pyhsm.util.input_validate_key_handle(key_handle) self.public_id = pyhsm.util.input_validate_nonce(public_id, pad = True) aead = pyhsm.util.input_validate_aead(aead, expected_len = pyhsm.defines.YSM_YUBIKEY_AEAD_SIZE) if nonce is None: # typedef struct { # uint8_t publicId[YSM_PUBLIC_ID_SIZE]; // Public id (nonce) # uint32_t keyHandle; // Key handle # uint8_t aead[YSM_YUBIKEY_AEAD_SIZE]; // AEAD block # } YSM_DB_YUBIKEY_AEAD_STORE_REQ; fmt = "< %is I %is" % (pyhsm.defines.YSM_PUBLIC_ID_SIZE, \ pyhsm.defines.YSM_YUBIKEY_AEAD_SIZE) packed = struct.pack(fmt, self.public_id, self.key_handle, aead) YHSM_Cmd.__init__(self, stick, pyhsm.defines.YSM_DB_YUBIKEY_AEAD_STORE, packed) else: nonce = pyhsm.util.input_validate_nonce(nonce) # typedef struct { # uint8_t publicId[YSM_PUBLIC_ID_SIZE]; // Public id # uint32_t keyHandle; // Key handle # uint8_t aead[YSM_YUBIKEY_AEAD_SIZE]; // AEAD block # uint8_t nonce[YSM_AEAD_NONCE_SIZE]; // Nonce # } YSM_DB_YUBIKEY_AEAD_STORE2_REQ; fmt = "< %is I %is %is" % (pyhsm.defines.YSM_PUBLIC_ID_SIZE, \ pyhsm.defines.YSM_YUBIKEY_AEAD_SIZE, \ pyhsm.defines.YSM_AEAD_NONCE_SIZE) packed = struct.pack(fmt, self.public_id, self.key_handle, aead, nonce) YHSM_Cmd.__init__(self, stick, pyhsm.defines.YSM_DB_YUBIKEY_AEAD_STORE2, packed) def parse_result(self, data): """ Return True if the AEAD was stored sucessfully. """ # typedef struct { # uint8_t publicId[YSM_PUBLIC_ID_SIZE]; // Public id (nonce) # uint32_t keyHandle; // Key handle # YSM_STATUS status; // Validation status # } YSM_DB_YUBIKEY_AEAD_STORE_RESP; public_id, \ key_handle, \ self.status = struct.unpack("< %is I B" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE), data) pyhsm.util.validate_cmd_response_str('public_id', public_id, self.public_id) pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle) if self.status == pyhsm.defines.YSM_STATUS_OK: return True else: raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status) class YHSM_Cmd_DB_Validate_OTP(YHSM_Cmd): """ Request the YubiHSM to validate an OTP for a YubiKey stored in the internal database. """ response = None status = None def __init__(self, stick, public_id, otp): self.public_id = pyhsm.util.input_validate_nonce(public_id, pad = True) self.otp = pyhsm.util.input_validate_str(otp, 'otp', exact_len = pyhsm.defines.YSM_OTP_SIZE) # typedef struct { # uint8_t publicId[YSM_PUBLIC_ID_SIZE]; // Public id # uint8_t otp[YSM_OTP_SIZE]; // OTP # } YSM_DB_OTP_VALIDATE_REQ; fmt = "%is %is" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE, pyhsm.defines.YSM_OTP_SIZE) packed = struct.pack(fmt, self.public_id, self.otp) YHSM_Cmd.__init__(self, stick, pyhsm.defines.YSM_DB_OTP_VALIDATE, packed) def __repr__(self): if self.executed: return '<%s instance at %s: public_id=%s, status=0x%x>' % ( self.__class__.__name__, hex(id(self)), self.public_id.encode('hex'), self.status ) else: return '<%s instance at %s (not executed)>' % ( self.__class__.__name__, hex(id(self)) ) def parse_result(self, data): # typedef struct { # uint8_t public_id[YSM_PUBLIC_ID_SIZE]; // Public id # uint16_t use_ctr; // Use counter # uint8_t session_ctr; // Session counter # uint8_t tstph; // Timestamp (high part) # uint16_t tstpl; // Timestamp (low part) # YHSM_STATUS status; // Validation status # } YHSM_AEAD_OTP_DECODED_RESP; fmt = "%is H B B H B" % (pyhsm.defines.YSM_PUBLIC_ID_SIZE) public_id, \ use_ctr, \ session_ctr, \ ts_high, \ ts_low, \ self.status = struct.unpack(fmt, data) pyhsm.util.validate_cmd_response_str('public_id', public_id, self.public_id) if self.status == pyhsm.defines.YSM_STATUS_OK: self.response = pyhsm.validate_cmd.YHSM_ValidationResult( \ public_id, use_ctr, session_ctr, ts_high, ts_low) return self.response else: raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status)
Yubico/python-pyhsm
pyhsm/db_cmd.py
Python
bsd-2-clause
5,954
#!/usr/bin/env python import os import unittest from nose.plugins.attrib import attr from mi.core.log import get_logger from mi.dataset.dataset_driver import ParticleDataHandler from mi.dataset.driver.adcpt_acfgm.dcl.pd0.adcpt_acfgm_dcl_pd0_telemetered_driver import parse from mi.dataset.driver.adcpt_acfgm.dcl.pd0.resource import RESOURCE_PATH __author__ = 'Jeff Roy' log = get_logger() @attr('UNIT', group='mi') class SampleTest(unittest.TestCase): def test_one(self): source_file_path = os.path.join(RESOURCE_PATH, '20140424.adcpt.log') particle_data_handler = ParticleDataHandler() particle_data_handler = parse(None, source_file_path, particle_data_handler) log.debug("SAMPLES: %s", particle_data_handler._samples) log.debug("FAILURE: %s", particle_data_handler._failure) self.assertEquals(particle_data_handler._failure, False) if __name__ == '__main__': test = SampleTest('test_one') test.test_one()
petercable/mi-dataset
mi/dataset/driver/adcpt_acfgm/dcl/pd0/test/test_adcpt_acfgm_dcl_pd0_telemetered_driver.py
Python
bsd-2-clause
980
import fractions import operator import os import random import sys import struct import time import unittest from test import support from test.test_grammar import (VALID_UNDERSCORE_LITERALS, INVALID_UNDERSCORE_LITERALS) from math import isinf, isnan, copysign, ldexp INF = float("inf") NAN = float("nan") have_getformat = hasattr(float, "__getformat__") requires_getformat = unittest.skipUnless(have_getformat, "requires __getformat__") requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"), "requires __setformat__") #locate file with float format test values test_dir = os.path.dirname(__file__) or os.curdir format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt') class FloatSubclass(float): pass class OtherFloatSubclass(float): pass class GeneralFloatCases(unittest.TestCase): def test_float(self): self.assertEqual(float(3.14), 3.14) self.assertEqual(float(314), 314.0) self.assertEqual(float(" 3.14 "), 3.14) self.assertRaises(ValueError, float, " 0x3.1 ") self.assertRaises(ValueError, float, " -0x3.p-1 ") self.assertRaises(ValueError, float, " +0x3.p-1 ") self.assertRaises(ValueError, float, "++3.14") self.assertRaises(ValueError, float, "+-3.14") self.assertRaises(ValueError, float, "-+3.14") self.assertRaises(ValueError, float, "--3.14") self.assertRaises(ValueError, float, ".nan") self.assertRaises(ValueError, float, "+.inf") self.assertRaises(ValueError, float, ".") self.assertRaises(ValueError, float, "-.") self.assertRaises(TypeError, float, {}) self.assertRaisesRegex(TypeError, "not 'dict'", float, {}) # Lone surrogate self.assertRaises((UnicodeEncodeError, ValueError), float, '\uD8F0') # check that we don't accept alternate exponent markers self.assertRaises(ValueError, float, "-1.7d29") self.assertRaises(ValueError, float, "3D-14") self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14) self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14) # extra long strings should not be a problem float(b'.' + b'1'*1000) float('.' + '1'*1000) def test_underscores(self): for lit in VALID_UNDERSCORE_LITERALS: if not any(ch in lit for ch in 'jJxXoObB'): self.assertEqual(float(lit), eval(lit)) self.assertEqual(float(lit), float(lit.replace('_', ''))) for lit in INVALID_UNDERSCORE_LITERALS: if lit in ('0_7', '09_99'): # octals are not recognized here continue if not any(ch in lit for ch in 'jJxXoObB'): self.assertRaises(ValueError, float, lit) # Additional test cases; nan and inf are never valid as literals, # only in the float() constructor, but we don't allow underscores # in or around them. self.assertRaises(ValueError, float, '_NaN') self.assertRaises(ValueError, float, 'Na_N') self.assertRaises(ValueError, float, 'IN_F') self.assertRaises(ValueError, float, '-_INF') self.assertRaises(ValueError, float, '-INF_') # Check that we handle bytes values correctly. self.assertRaises(ValueError, float, b'0_.\xff9') def test_non_numeric_input_types(self): # Test possible non-numeric types for the argument x, including # subclasses of the explicitly documented accepted types. class CustomStr(str): pass class CustomBytes(bytes): pass class CustomByteArray(bytearray): pass factories = [ bytes, bytearray, lambda b: CustomStr(b.decode()), CustomBytes, CustomByteArray, memoryview, ] try: from array import array except ImportError: pass else: factories.append(lambda b: array('B', b)) for f in factories: x = f(b" 3.14 ") with self.subTest(type(x)): self.assertEqual(float(x), 3.14) with self.assertRaisesRegex(ValueError, "could not convert"): float(f(b'A' * 0x10)) def test_float_memoryview(self): self.assertEqual(float(memoryview(b'12.3')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3\x00')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3 ')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3A')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.34')[1:4]), 2.3) def test_error_message(self): testlist = ('\xbd', '123\xbd', ' 123 456 ') for s in testlist: try: float(s) except ValueError as e: self.assertIn(s.strip(), e.args[0]) else: self.fail("Expected int(%r) to raise a ValueError", s) @support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE') def test_float_with_comma(self): # set locale to something that doesn't use '.' for the decimal point # float must not accept the locale specific decimal point but # it still has to accept the normal python syntax import locale if not locale.localeconv()['decimal_point'] == ',': self.skipTest('decimal_point is not ","') self.assertEqual(float(" 3.14 "), 3.14) self.assertEqual(float("+3.14 "), 3.14) self.assertEqual(float("-3.14 "), -3.14) self.assertEqual(float(".14 "), .14) self.assertEqual(float("3. "), 3.0) self.assertEqual(float("3.e3 "), 3000.0) self.assertEqual(float("3.2e3 "), 3200.0) self.assertEqual(float("2.5e-1 "), 0.25) self.assertEqual(float("5e-1"), 0.5) self.assertRaises(ValueError, float, " 3,14 ") self.assertRaises(ValueError, float, " +3,14 ") self.assertRaises(ValueError, float, " -3,14 ") self.assertRaises(ValueError, float, " 0x3.1 ") self.assertRaises(ValueError, float, " -0x3.p-1 ") self.assertRaises(ValueError, float, " +0x3.p-1 ") self.assertEqual(float(" 25.e-1 "), 2.5) self.assertAlmostEqual(float(" .25e-1 "), .025) def test_floatconversion(self): # Make sure that calls to __float__() work properly class Foo1(object): def __float__(self): return 42. class Foo2(float): def __float__(self): return 42. class Foo3(float): def __new__(cls, value=0.): return float.__new__(cls, 2*value) def __float__(self): return self class Foo4(float): def __float__(self): return 42 # Issue 5759: __float__ not called on str subclasses (though it is on # unicode subclasses). class FooStr(str): def __float__(self): return float(str(self)) + 1 self.assertEqual(float(Foo1()), 42.) self.assertEqual(float(Foo2()), 42.) with self.assertWarns(DeprecationWarning): self.assertEqual(float(Foo3(21)), 42.) self.assertRaises(TypeError, float, Foo4(42)) self.assertEqual(float(FooStr('8')), 9.) class Foo5: def __float__(self): return "" self.assertRaises(TypeError, time.sleep, Foo5()) # Issue #24731 class F: def __float__(self): return OtherFloatSubclass(42.) with self.assertWarns(DeprecationWarning): self.assertEqual(float(F()), 42.) with self.assertWarns(DeprecationWarning): self.assertIs(type(float(F())), float) with self.assertWarns(DeprecationWarning): self.assertEqual(FloatSubclass(F()), 42.) with self.assertWarns(DeprecationWarning): self.assertIs(type(FloatSubclass(F())), FloatSubclass) def test_is_integer(self): self.assertFalse((1.1).is_integer()) self.assertTrue((1.).is_integer()) self.assertFalse(float("nan").is_integer()) self.assertFalse(float("inf").is_integer()) def test_floatasratio(self): for f, ratio in [ (0.875, (7, 8)), (-0.875, (-7, 8)), (0.0, (0, 1)), (11.5, (23, 2)), ]: self.assertEqual(f.as_integer_ratio(), ratio) for i in range(10000): f = random.random() f *= 10 ** random.randint(-100, 100) n, d = f.as_integer_ratio() self.assertEqual(float(n).__truediv__(d), f) R = fractions.Fraction self.assertEqual(R(0, 1), R(*float(0.0).as_integer_ratio())) self.assertEqual(R(5, 2), R(*float(2.5).as_integer_ratio())) self.assertEqual(R(1, 2), R(*float(0.5).as_integer_ratio())) self.assertEqual(R(4728779608739021, 2251799813685248), R(*float(2.1).as_integer_ratio())) self.assertEqual(R(-4728779608739021, 2251799813685248), R(*float(-2.1).as_integer_ratio())) self.assertEqual(R(-2100, 1), R(*float(-2100.0).as_integer_ratio())) self.assertRaises(OverflowError, float('inf').as_integer_ratio) self.assertRaises(OverflowError, float('-inf').as_integer_ratio) self.assertRaises(ValueError, float('nan').as_integer_ratio) def test_float_containment(self): floats = (INF, -INF, 0.0, 1.0, NAN) for f in floats: if f is NAN and support.check_impl_detail(pypy=False): self.assertIn(f, [f]) self.assertIn(f, (f,)) self.assertIn(f, {f}) self.assertIn(f, {f: None}) self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f) self.assertIn(f, floats) for f in floats: # nonidentical containers, same type, same contents self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f)) self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f)) self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f)) self.assertTrue({f : None} == {f: None}, "{%r : None} != " "{%r : None}" % (f, f)) # identical containers l, t, s, d = [f], (f,), {f}, {f: None} self.assertTrue(l == l, "[%r] not equal to itself" % f) self.assertTrue(t == t, "(%r,) not equal to itself" % f) self.assertTrue(s == s, "{%r} not equal to itself" % f) self.assertTrue(d == d, "{%r : None} not equal to itself" % f) def assertEqualAndEqualSign(self, a, b): # fail unless a == b and a and b have the same sign bit; # the only difference from assertEqual is that this test # distinguishes -0.0 and 0.0. self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b))) @support.requires_IEEE_754 def test_float_mod(self): # Check behaviour of % operator for IEEE 754 special cases. # In particular, check signs of zeros. mod = operator.mod self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0) self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100) self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100) self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0) self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0) @support.requires_IEEE_754 def test_float_pow(self): # test builtin pow and ** operator for IEEE 754 special cases. # Special cases taken from section F.9.4.4 of the C99 specification for pow_op in pow, operator.pow: # x**NAN is NAN for any x except 1 self.assertTrue(isnan(pow_op(-INF, NAN))) self.assertTrue(isnan(pow_op(-2.0, NAN))) self.assertTrue(isnan(pow_op(-1.0, NAN))) self.assertTrue(isnan(pow_op(-0.5, NAN))) self.assertTrue(isnan(pow_op(-0.0, NAN))) self.assertTrue(isnan(pow_op(0.0, NAN))) self.assertTrue(isnan(pow_op(0.5, NAN))) self.assertTrue(isnan(pow_op(2.0, NAN))) self.assertTrue(isnan(pow_op(INF, NAN))) self.assertTrue(isnan(pow_op(NAN, NAN))) # NAN**y is NAN for any y except +-0 self.assertTrue(isnan(pow_op(NAN, -INF))) self.assertTrue(isnan(pow_op(NAN, -2.0))) self.assertTrue(isnan(pow_op(NAN, -1.0))) self.assertTrue(isnan(pow_op(NAN, -0.5))) self.assertTrue(isnan(pow_op(NAN, 0.5))) self.assertTrue(isnan(pow_op(NAN, 1.0))) self.assertTrue(isnan(pow_op(NAN, 2.0))) self.assertTrue(isnan(pow_op(NAN, INF))) # (+-0)**y raises ZeroDivisionError for y a negative odd integer self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0) # (+-0)**y raises ZeroDivisionError for y finite and negative # but not an odd integer self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0) self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5) # (+-0)**y is +-0 for y a positive odd integer self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0) self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0) # (+-0)**y is 0 for y finite and positive but not an odd integer self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0) self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0) # (-1)**+-inf is 1 self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0) # 1**y is 1 for any y, even if y is an infinity or nan self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0) # x**+-0 is 1 for any x, even if x is a zero, infinity, or nan self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0) # x**y defers to complex pow for finite negative x and # non-integral y. self.assertEqual(type(pow_op(-2.0, -0.5)), complex) self.assertEqual(type(pow_op(-2.0, 0.5)), complex) self.assertEqual(type(pow_op(-1.0, -0.5)), complex) self.assertEqual(type(pow_op(-1.0, 0.5)), complex) self.assertEqual(type(pow_op(-0.5, -0.5)), complex) self.assertEqual(type(pow_op(-0.5, 0.5)), complex) # x**-INF is INF for abs(x) < 1 self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF) self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF) self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF) self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF) # x**-INF is 0 for abs(x) > 1 self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0) # x**INF is 0 for abs(x) < 1 self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0) self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0) # x**INF is INF for abs(x) > 1 self.assertEqualAndEqualSign(pow_op(-INF, INF), INF) self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF) self.assertEqualAndEqualSign(pow_op(2.0, INF), INF) self.assertEqualAndEqualSign(pow_op(INF, INF), INF) # (-INF)**y is -0.0 for y a negative odd integer self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0) # (-INF)**y is 0.0 for y negative but not an odd integer self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0) self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0) # (-INF)**y is -INF for y a positive odd integer self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF) # (-INF)**y is INF for y positive but not an odd integer self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF) self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF) # INF**y is INF for y positive self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF) self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF) self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF) # INF**y is 0.0 for y negative self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0) # basic checks not covered by the special cases above self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25) self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5) self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0) self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0) self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25) self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5) self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0) self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0) # 1 ** large and -1 ** large; some libms apparently # have problems with these self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0) # check sign for results that underflow to 0 self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0) self.assertEqual(type(pow_op(-2.0, -2000.5)), complex) self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0) self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0) self.assertEqual(type(pow_op(-0.5, 2000.5)), complex) self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0) # check we don't raise an exception for subnormal results, # and validate signs. Tests currently disabled, since # they fail on systems where a subnormal result from pow # is flushed to zero (e.g. Debian/ia64.) #self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315) #self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315) #self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315) #self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315) #self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315) #self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315) #self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315) #self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315) @requires_setformat class FormatFunctionsTestCase(unittest.TestCase): def setUp(self): self.save_formats = {'double':float.__getformat__('double'), 'float':float.__getformat__('float')} def tearDown(self): float.__setformat__('double', self.save_formats['double']) float.__setformat__('float', self.save_formats['float']) def test_getformat(self): self.assertIn(float.__getformat__('double'), ['unknown', 'IEEE, big-endian', 'IEEE, little-endian']) self.assertIn(float.__getformat__('float'), ['unknown', 'IEEE, big-endian', 'IEEE, little-endian']) self.assertRaises(ValueError, float.__getformat__, 'chicken') self.assertRaises(TypeError, float.__getformat__, 1) def test_setformat(self): for t in 'double', 'float': float.__setformat__(t, 'unknown') if self.save_formats[t] == 'IEEE, big-endian': self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, little-endian') elif self.save_formats[t] == 'IEEE, little-endian': self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, big-endian') else: self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, big-endian') self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, little-endian') self.assertRaises(ValueError, float.__setformat__, t, 'chicken') self.assertRaises(ValueError, float.__setformat__, 'chicken', 'unknown') BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00' LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF)) BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00' LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN)) BE_FLOAT_INF = b'\x7f\x80\x00\x00' LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF)) BE_FLOAT_NAN = b'\x7f\xc0\x00\x00' LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN)) # on non-IEEE platforms, attempting to unpack a bit pattern # representing an infinity or a NaN should raise an exception. @requires_setformat class UnknownFormatTestCase(unittest.TestCase): def setUp(self): self.save_formats = {'double':float.__getformat__('double'), 'float':float.__getformat__('float')} float.__setformat__('double', 'unknown') float.__setformat__('float', 'unknown') def tearDown(self): float.__setformat__('double', self.save_formats['double']) float.__setformat__('float', self.save_formats['float']) def test_double_specials_dont_unpack(self): for fmt, data in [('>d', BE_DOUBLE_INF), ('>d', BE_DOUBLE_NAN), ('<d', LE_DOUBLE_INF), ('<d', LE_DOUBLE_NAN)]: self.assertRaises(ValueError, struct.unpack, fmt, data) def test_float_specials_dont_unpack(self): for fmt, data in [('>f', BE_FLOAT_INF), ('>f', BE_FLOAT_NAN), ('<f', LE_FLOAT_INF), ('<f', LE_FLOAT_NAN)]: self.assertRaises(ValueError, struct.unpack, fmt, data) # on an IEEE platform, all we guarantee is that bit patterns # representing infinities or NaNs do not raise an exception; all else # is accident (today). # let's also try to guarantee that -0.0 and 0.0 don't get confused. class IEEEFormatTestCase(unittest.TestCase): @support.requires_IEEE_754 def test_double_specials_do_unpack(self): for fmt, data in [('>d', BE_DOUBLE_INF), ('>d', BE_DOUBLE_NAN), ('<d', LE_DOUBLE_INF), ('<d', LE_DOUBLE_NAN)]: struct.unpack(fmt, data) @support.requires_IEEE_754 def test_float_specials_do_unpack(self): for fmt, data in [('>f', BE_FLOAT_INF), ('>f', BE_FLOAT_NAN), ('<f', LE_FLOAT_INF), ('<f', LE_FLOAT_NAN)]: struct.unpack(fmt, data) class FormatTestCase(unittest.TestCase): def test_format(self): # these should be rewritten to use both format(x, spec) and # x.__format__(spec) self.assertEqual(format(0.0, 'f'), '0.000000') # the default is 'g', except for empty format spec self.assertEqual(format(0.0, ''), '0.0') self.assertEqual(format(0.01, ''), '0.01') self.assertEqual(format(0.01, 'g'), '0.01') # empty presentation type should format in the same way as str # (issue 5920) x = 100/7. self.assertEqual(format(x, ''), str(x)) self.assertEqual(format(x, '-'), str(x)) self.assertEqual(format(x, '>'), str(x)) self.assertEqual(format(x, '2'), str(x)) self.assertEqual(format(1.0, 'f'), '1.000000') self.assertEqual(format(-1.0, 'f'), '-1.000000') self.assertEqual(format( 1.0, ' f'), ' 1.000000') self.assertEqual(format(-1.0, ' f'), '-1.000000') self.assertEqual(format( 1.0, '+f'), '+1.000000') self.assertEqual(format(-1.0, '+f'), '-1.000000') # % formatting self.assertEqual(format(-1.0, '%'), '-100.000000%') # conversion to string should fail self.assertRaises(ValueError, format, 3.0, "s") # other format specifiers shouldn't work on floats, # in particular int specifiers for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'eEfFgGn%': self.assertRaises(ValueError, format, 0.0, format_spec) self.assertRaises(ValueError, format, 1.0, format_spec) self.assertRaises(ValueError, format, -1.0, format_spec) self.assertRaises(ValueError, format, 1e100, format_spec) self.assertRaises(ValueError, format, -1e100, format_spec) self.assertRaises(ValueError, format, 1e-100, format_spec) self.assertRaises(ValueError, format, -1e-100, format_spec) # issue 3382 self.assertEqual(format(NAN, 'f'), 'nan') self.assertEqual(format(NAN, 'F'), 'NAN') self.assertEqual(format(INF, 'f'), 'inf') self.assertEqual(format(INF, 'F'), 'INF') @support.requires_IEEE_754 def test_format_testfile(self): with open(format_testfile) as testfile: for line in testfile: if line.startswith('--'): continue line = line.strip() if not line: continue lhs, rhs = map(str.strip, line.split('->')) fmt, arg = lhs.split() self.assertEqual(fmt % float(arg), rhs) self.assertEqual(fmt % -float(arg), '-' + rhs) def test_issue5864(self): self.assertEqual(format(123.456, '.4'), '123.5') self.assertEqual(format(1234.56, '.4'), '1.235e+03') self.assertEqual(format(12345.6, '.4'), '1.235e+04') class ReprTestCase(unittest.TestCase): def test_repr(self): floats_file = open(os.path.join(os.path.split(__file__)[0], 'floating_points.txt')) for line in floats_file: line = line.strip() if not line or line.startswith('#'): continue v = eval(line) self.assertEqual(v, eval(repr(v))) floats_file.close() @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_short_repr(self): # test short float repr introduced in Python 3.1. One aspect # of this repr is that we get some degree of str -> float -> # str roundtripping. In particular, for any numeric string # containing 15 or fewer significant digits, those exact same # digits (modulo trailing zeros) should appear in the output. # No more repr(0.03) -> "0.029999999999999999"! test_strings = [ # output always includes *either* a decimal point and at # least one digit after that point, or an exponent. '0.0', '1.0', '0.01', '0.02', '0.03', '0.04', '0.05', '1.23456789', '10.0', '100.0', # values >= 1e16 get an exponent... '1000000000000000.0', '9999999999999990.0', '1e+16', '1e+17', # ... and so do values < 1e-4 '0.001', '0.001001', '0.00010000000000001', '0.0001', '9.999999999999e-05', '1e-05', # values designed to provoke failure if the FPU rounding # precision isn't set correctly '8.72293771110361e+25', '7.47005307342313e+26', '2.86438000439698e+28', '8.89142905246179e+28', '3.08578087079232e+35', ] for s in test_strings: negs = '-'+s self.assertEqual(s, repr(float(s))) self.assertEqual(negs, repr(float(negs))) # Since Python 3.2, repr and str are identical self.assertEqual(repr(float(s)), str(float(s))) self.assertEqual(repr(float(negs)), str(float(negs))) @support.requires_IEEE_754 class RoundTestCase(unittest.TestCase): def test_inf_nan(self): self.assertRaises(OverflowError, round, INF) self.assertRaises(OverflowError, round, -INF) self.assertRaises(ValueError, round, NAN) self.assertRaises(TypeError, round, INF, 0.0) self.assertRaises(TypeError, round, -INF, 1.0) self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer") self.assertRaises(TypeError, round, -0.0, 1j) def test_large_n(self): for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]: self.assertEqual(round(123.456, n), 123.456) self.assertEqual(round(-123.456, n), -123.456) self.assertEqual(round(1e300, n), 1e300) self.assertEqual(round(1e-320, n), 1e-320) self.assertEqual(round(1e150, 300), 1e150) self.assertEqual(round(1e300, 307), 1e300) self.assertEqual(round(-3.1415, 308), -3.1415) self.assertEqual(round(1e150, 309), 1e150) self.assertEqual(round(1.4e-315, 315), 1e-315) def test_small_n(self): for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]: self.assertEqual(round(123.456, n), 0.0) self.assertEqual(round(-123.456, n), -0.0) self.assertEqual(round(1e300, n), 0.0) self.assertEqual(round(1e-320, n), 0.0) def test_overflow(self): self.assertRaises(OverflowError, round, 1.6e308, -308) self.assertRaises(OverflowError, round, -1.7e308, -308) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_previous_round_bugs(self): # particular cases that have occurred in bug reports self.assertEqual(round(562949953421312.5, 1), 562949953421312.5) self.assertEqual(round(56294995342131.5, 3), 56294995342131.5) # round-half-even self.assertEqual(round(25.0, -1), 20.0) self.assertEqual(round(35.0, -1), 40.0) self.assertEqual(round(45.0, -1), 40.0) self.assertEqual(round(55.0, -1), 60.0) self.assertEqual(round(65.0, -1), 60.0) self.assertEqual(round(75.0, -1), 80.0) self.assertEqual(round(85.0, -1), 80.0) self.assertEqual(round(95.0, -1), 100.0) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_matches_float_format(self): # round should give the same results as float formatting for i in range(500): x = i/1000. self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) for i in range(5, 5000, 10): x = i/1000. self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) for i in range(500): x = random.random() self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) def test_format_specials(self): # Test formatting of nans and infs. def test(fmt, value, expected): # Test with both % and format(). self.assertEqual(fmt % value, expected, fmt) fmt = fmt[1:] # strip off the % self.assertEqual(format(value, fmt), expected, fmt) for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g', '%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']: pfmt = '%+' + fmt[1:] sfmt = '% ' + fmt[1:] test(fmt, INF, 'inf') test(fmt, -INF, '-inf') test(fmt, NAN, 'nan') test(fmt, -NAN, 'nan') # When asking for a sign, it's always provided. nans are # always positive. test(pfmt, INF, '+inf') test(pfmt, -INF, '-inf') test(pfmt, NAN, '+nan') test(pfmt, -NAN, '+nan') # When using ' ' for a sign code, only infs can be negative. # Others have a space. test(sfmt, INF, ' inf') test(sfmt, -INF, '-inf') test(sfmt, NAN, ' nan') test(sfmt, -NAN, ' nan') def test_None_ndigits(self): for x in round(1.23), round(1.23, None), round(1.23, ndigits=None): self.assertEqual(x, 1) self.assertIsInstance(x, int) for x in round(1.78), round(1.78, None), round(1.78, ndigits=None): self.assertEqual(x, 2) self.assertIsInstance(x, int) # Beginning with Python 2.6 float has cross platform compatible # ways to create and represent inf and nan class InfNanTest(unittest.TestCase): def test_inf_from_str(self): self.assertTrue(isinf(float("inf"))) self.assertTrue(isinf(float("+inf"))) self.assertTrue(isinf(float("-inf"))) self.assertTrue(isinf(float("infinity"))) self.assertTrue(isinf(float("+infinity"))) self.assertTrue(isinf(float("-infinity"))) self.assertEqual(repr(float("inf")), "inf") self.assertEqual(repr(float("+inf")), "inf") self.assertEqual(repr(float("-inf")), "-inf") self.assertEqual(repr(float("infinity")), "inf") self.assertEqual(repr(float("+infinity")), "inf") self.assertEqual(repr(float("-infinity")), "-inf") self.assertEqual(repr(float("INF")), "inf") self.assertEqual(repr(float("+Inf")), "inf") self.assertEqual(repr(float("-iNF")), "-inf") self.assertEqual(repr(float("Infinity")), "inf") self.assertEqual(repr(float("+iNfInItY")), "inf") self.assertEqual(repr(float("-INFINITY")), "-inf") self.assertEqual(str(float("inf")), "inf") self.assertEqual(str(float("+inf")), "inf") self.assertEqual(str(float("-inf")), "-inf") self.assertEqual(str(float("infinity")), "inf") self.assertEqual(str(float("+infinity")), "inf") self.assertEqual(str(float("-infinity")), "-inf") self.assertRaises(ValueError, float, "info") self.assertRaises(ValueError, float, "+info") self.assertRaises(ValueError, float, "-info") self.assertRaises(ValueError, float, "in") self.assertRaises(ValueError, float, "+in") self.assertRaises(ValueError, float, "-in") self.assertRaises(ValueError, float, "infinit") self.assertRaises(ValueError, float, "+Infin") self.assertRaises(ValueError, float, "-INFI") self.assertRaises(ValueError, float, "infinitys") self.assertRaises(ValueError, float, "++Inf") self.assertRaises(ValueError, float, "-+inf") self.assertRaises(ValueError, float, "+-infinity") self.assertRaises(ValueError, float, "--Infinity") def test_inf_as_str(self): self.assertEqual(repr(1e300 * 1e300), "inf") self.assertEqual(repr(-1e300 * 1e300), "-inf") self.assertEqual(str(1e300 * 1e300), "inf") self.assertEqual(str(-1e300 * 1e300), "-inf") def test_nan_from_str(self): self.assertTrue(isnan(float("nan"))) self.assertTrue(isnan(float("+nan"))) self.assertTrue(isnan(float("-nan"))) self.assertEqual(repr(float("nan")), "nan") self.assertEqual(repr(float("+nan")), "nan") self.assertEqual(repr(float("-nan")), "nan") self.assertEqual(repr(float("NAN")), "nan") self.assertEqual(repr(float("+NAn")), "nan") self.assertEqual(repr(float("-NaN")), "nan") self.assertEqual(str(float("nan")), "nan") self.assertEqual(str(float("+nan")), "nan") self.assertEqual(str(float("-nan")), "nan") self.assertRaises(ValueError, float, "nana") self.assertRaises(ValueError, float, "+nana") self.assertRaises(ValueError, float, "-nana") self.assertRaises(ValueError, float, "na") self.assertRaises(ValueError, float, "+na") self.assertRaises(ValueError, float, "-na") self.assertRaises(ValueError, float, "++nan") self.assertRaises(ValueError, float, "-+NAN") self.assertRaises(ValueError, float, "+-NaN") self.assertRaises(ValueError, float, "--nAn") def test_nan_as_str(self): self.assertEqual(repr(1e300 * 1e300 * 0), "nan") self.assertEqual(repr(-1e300 * 1e300 * 0), "nan") self.assertEqual(str(1e300 * 1e300 * 0), "nan") self.assertEqual(str(-1e300 * 1e300 * 0), "nan") def test_inf_signs(self): self.assertEqual(copysign(1.0, float('inf')), 1.0) self.assertEqual(copysign(1.0, float('-inf')), -1.0) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_nan_signs(self): # When using the dtoa.c code, the sign of float('nan') should # be predictable. self.assertEqual(copysign(1.0, float('nan')), 1.0) self.assertEqual(copysign(1.0, float('-nan')), -1.0) fromHex = float.fromhex toHex = float.hex class HexFloatTestCase(unittest.TestCase): MAX = fromHex('0x.fffffffffffff8p+1024') # max normal MIN = fromHex('0x1p-1022') # min normal TINY = fromHex('0x0.0000000000001p-1022') # min subnormal EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up def identical(self, x, y): # check that floats x and y are identical, or that both # are NaNs if isnan(x) or isnan(y): if isnan(x) == isnan(y): return elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)): return self.fail('%r not identical to %r' % (x, y)) def test_ends(self): self.identical(self.MIN, ldexp(1.0, -1022)) self.identical(self.TINY, ldexp(1.0, -1074)) self.identical(self.EPS, ldexp(1.0, -52)) self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970))) def test_invalid_inputs(self): invalid_inputs = [ 'infi', # misspelt infinities and nans '-Infinit', '++inf', '-+Inf', '--nan', '+-NaN', 'snan', 'NaNs', 'nna', 'an', 'nf', 'nfinity', 'inity', 'iinity', '0xnan', '', ' ', 'x1.0p0', '0xX1.0p0', '+ 0x1.0p0', # internal whitespace '- 0x1.0p0', '0 x1.0p0', '0x 1.0p0', '0x1 2.0p0', '+0x1 .0p0', '0x1. 0p0', '-0x1.0 1p0', '-0x1.0 p0', '+0x1.0p +0', '0x1.0p -0', '0x1.0p 0', '+0x1.0p+ 0', '-0x1.0p- 0', '++0x1.0p-0', # double signs '--0x1.0p0', '+-0x1.0p+0', '-+0x1.0p0', '0x1.0p++0', '+0x1.0p+-0', '-0x1.0p-+0', '0x1.0p--0', '0x1.0.p0', '0x.p0', # no hex digits before or after point '0x1,p0', # wrong decimal point character '0x1pa', '0x1p\uff10', # fullwidth Unicode digits '\uff10x1p0', '0x\uff11p0', '0x1.\uff10p0', '0x1p0 \n 0x2p0', '0x1p0\0 0x1p0', # embedded null byte is not end of string ] for x in invalid_inputs: try: result = fromHex(x) except ValueError: pass else: self.fail('Expected float.fromhex(%r) to raise ValueError; ' 'got %r instead' % (x, result)) def test_whitespace(self): value_pairs = [ ('inf', INF), ('-Infinity', -INF), ('nan', NAN), ('1.0', 1.0), ('-0x.2', -0.125), ('-0.0', -0.0) ] whitespace = [ '', ' ', '\t', '\n', '\n \t', '\f', '\v', '\r' ] for inp, expected in value_pairs: for lead in whitespace: for trail in whitespace: got = fromHex(lead + inp + trail) self.identical(got, expected) def test_from_hex(self): MIN = self.MIN; MAX = self.MAX; TINY = self.TINY; EPS = self.EPS; # two spellings of infinity, with optional signs; case-insensitive self.identical(fromHex('inf'), INF) self.identical(fromHex('+Inf'), INF) self.identical(fromHex('-INF'), -INF) self.identical(fromHex('iNf'), INF) self.identical(fromHex('Infinity'), INF) self.identical(fromHex('+INFINITY'), INF) self.identical(fromHex('-infinity'), -INF) self.identical(fromHex('-iNFiNitY'), -INF) # nans with optional sign; case insensitive self.identical(fromHex('nan'), NAN) self.identical(fromHex('+NaN'), NAN) self.identical(fromHex('-NaN'), NAN) self.identical(fromHex('-nAN'), NAN) # variations in input format self.identical(fromHex('1'), 1.0) self.identical(fromHex('+1'), 1.0) self.identical(fromHex('1.'), 1.0) self.identical(fromHex('1.0'), 1.0) self.identical(fromHex('1.0p0'), 1.0) self.identical(fromHex('01'), 1.0) self.identical(fromHex('01.'), 1.0) self.identical(fromHex('0x1'), 1.0) self.identical(fromHex('0x1.'), 1.0) self.identical(fromHex('0x1.0'), 1.0) self.identical(fromHex('+0x1.0'), 1.0) self.identical(fromHex('0x1p0'), 1.0) self.identical(fromHex('0X1p0'), 1.0) self.identical(fromHex('0X1P0'), 1.0) self.identical(fromHex('0x1P0'), 1.0) self.identical(fromHex('0x1.p0'), 1.0) self.identical(fromHex('0x1.0p0'), 1.0) self.identical(fromHex('0x.1p4'), 1.0) self.identical(fromHex('0x.1p04'), 1.0) self.identical(fromHex('0x.1p004'), 1.0) self.identical(fromHex('0x1p+0'), 1.0) self.identical(fromHex('0x1P-0'), 1.0) self.identical(fromHex('+0x1p0'), 1.0) self.identical(fromHex('0x01p0'), 1.0) self.identical(fromHex('0x1p00'), 1.0) self.identical(fromHex(' 0x1p0 '), 1.0) self.identical(fromHex('\n 0x1p0'), 1.0) self.identical(fromHex('0x1p0 \t'), 1.0) self.identical(fromHex('0xap0'), 10.0) self.identical(fromHex('0xAp0'), 10.0) self.identical(fromHex('0xaP0'), 10.0) self.identical(fromHex('0xAP0'), 10.0) self.identical(fromHex('0xbep0'), 190.0) self.identical(fromHex('0xBep0'), 190.0) self.identical(fromHex('0xbEp0'), 190.0) self.identical(fromHex('0XBE0P-4'), 190.0) self.identical(fromHex('0xBEp0'), 190.0) self.identical(fromHex('0xB.Ep4'), 190.0) self.identical(fromHex('0x.BEp8'), 190.0) self.identical(fromHex('0x.0BEp12'), 190.0) # moving the point around pi = fromHex('0x1.921fb54442d18p1') self.identical(fromHex('0x.006487ed5110b46p11'), pi) self.identical(fromHex('0x.00c90fdaa22168cp10'), pi) self.identical(fromHex('0x.01921fb54442d18p9'), pi) self.identical(fromHex('0x.03243f6a8885a3p8'), pi) self.identical(fromHex('0x.06487ed5110b46p7'), pi) self.identical(fromHex('0x.0c90fdaa22168cp6'), pi) self.identical(fromHex('0x.1921fb54442d18p5'), pi) self.identical(fromHex('0x.3243f6a8885a3p4'), pi) self.identical(fromHex('0x.6487ed5110b46p3'), pi) self.identical(fromHex('0x.c90fdaa22168cp2'), pi) self.identical(fromHex('0x1.921fb54442d18p1'), pi) self.identical(fromHex('0x3.243f6a8885a3p0'), pi) self.identical(fromHex('0x6.487ed5110b46p-1'), pi) self.identical(fromHex('0xc.90fdaa22168cp-2'), pi) self.identical(fromHex('0x19.21fb54442d18p-3'), pi) self.identical(fromHex('0x32.43f6a8885a3p-4'), pi) self.identical(fromHex('0x64.87ed5110b46p-5'), pi) self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi) self.identical(fromHex('0x192.1fb54442d18p-7'), pi) self.identical(fromHex('0x324.3f6a8885a3p-8'), pi) self.identical(fromHex('0x648.7ed5110b46p-9'), pi) self.identical(fromHex('0xc90.fdaa22168cp-10'), pi) self.identical(fromHex('0x1921.fb54442d18p-11'), pi) # ... self.identical(fromHex('0x1921fb54442d1.8p-47'), pi) self.identical(fromHex('0x3243f6a8885a3p-48'), pi) self.identical(fromHex('0x6487ed5110b46p-49'), pi) self.identical(fromHex('0xc90fdaa22168cp-50'), pi) self.identical(fromHex('0x1921fb54442d18p-51'), pi) self.identical(fromHex('0x3243f6a8885a30p-52'), pi) self.identical(fromHex('0x6487ed5110b460p-53'), pi) self.identical(fromHex('0xc90fdaa22168c0p-54'), pi) self.identical(fromHex('0x1921fb54442d180p-55'), pi) # results that should overflow... self.assertRaises(OverflowError, fromHex, '-0x1p1024') self.assertRaises(OverflowError, fromHex, '0x1p+1025') self.assertRaises(OverflowError, fromHex, '+0X1p1030') self.assertRaises(OverflowError, fromHex, '-0x1p+1100') self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789') self.assertRaises(OverflowError, fromHex, '+0X.8p+1025') self.assertRaises(OverflowError, fromHex, '+0x0.8p1025') self.assertRaises(OverflowError, fromHex, '-0x0.4p1026') self.assertRaises(OverflowError, fromHex, '0X2p+1023') self.assertRaises(OverflowError, fromHex, '0x2.p1023') self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023') self.assertRaises(OverflowError, fromHex, '+0X4p+1022') self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023') self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023') self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023') self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022') self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970') self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960') self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960') # ...and those that round to +-max float self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX) self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX) self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX) # zeros self.identical(fromHex('0x0p0'), 0.0) self.identical(fromHex('0x0p1000'), 0.0) self.identical(fromHex('-0x0p1023'), -0.0) self.identical(fromHex('0X0p1024'), 0.0) self.identical(fromHex('-0x0p1025'), -0.0) self.identical(fromHex('0X0p2000'), 0.0) self.identical(fromHex('0x0p123456789123456789'), 0.0) self.identical(fromHex('-0X0p-0'), -0.0) self.identical(fromHex('-0X0p-1000'), -0.0) self.identical(fromHex('0x0p-1023'), 0.0) self.identical(fromHex('-0X0p-1024'), -0.0) self.identical(fromHex('-0x0p-1025'), -0.0) self.identical(fromHex('-0x0p-1072'), -0.0) self.identical(fromHex('0X0p-1073'), 0.0) self.identical(fromHex('-0x0p-1074'), -0.0) self.identical(fromHex('0x0p-1075'), 0.0) self.identical(fromHex('0X0p-1076'), 0.0) self.identical(fromHex('-0X0p-2000'), -0.0) self.identical(fromHex('-0x0p-123456789123456789'), -0.0) # values that should underflow to 0 self.identical(fromHex('0X1p-1075'), 0.0) self.identical(fromHex('-0X1p-1075'), -0.0) self.identical(fromHex('-0x1p-123456789123456789'), -0.0) self.identical(fromHex('0x1.00000000000000001p-1075'), TINY) self.identical(fromHex('-0x1.1p-1075'), -TINY) self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY) # check round-half-even is working correctly near 0 ... self.identical(fromHex('0x1p-1076'), 0.0) self.identical(fromHex('0X2p-1076'), 0.0) self.identical(fromHex('0X3p-1076'), TINY) self.identical(fromHex('0x4p-1076'), TINY) self.identical(fromHex('0X5p-1076'), TINY) self.identical(fromHex('0X6p-1076'), 2*TINY) self.identical(fromHex('0x7p-1076'), 2*TINY) self.identical(fromHex('0X8p-1076'), 2*TINY) self.identical(fromHex('0X9p-1076'), 2*TINY) self.identical(fromHex('0xap-1076'), 2*TINY) self.identical(fromHex('0Xbp-1076'), 3*TINY) self.identical(fromHex('0xcp-1076'), 3*TINY) self.identical(fromHex('0Xdp-1076'), 3*TINY) self.identical(fromHex('0Xep-1076'), 4*TINY) self.identical(fromHex('0xfp-1076'), 4*TINY) self.identical(fromHex('0x10p-1076'), 4*TINY) self.identical(fromHex('-0x1p-1076'), -0.0) self.identical(fromHex('-0X2p-1076'), -0.0) self.identical(fromHex('-0x3p-1076'), -TINY) self.identical(fromHex('-0X4p-1076'), -TINY) self.identical(fromHex('-0x5p-1076'), -TINY) self.identical(fromHex('-0x6p-1076'), -2*TINY) self.identical(fromHex('-0X7p-1076'), -2*TINY) self.identical(fromHex('-0X8p-1076'), -2*TINY) self.identical(fromHex('-0X9p-1076'), -2*TINY) self.identical(fromHex('-0Xap-1076'), -2*TINY) self.identical(fromHex('-0xbp-1076'), -3*TINY) self.identical(fromHex('-0xcp-1076'), -3*TINY) self.identical(fromHex('-0Xdp-1076'), -3*TINY) self.identical(fromHex('-0xep-1076'), -4*TINY) self.identical(fromHex('-0Xfp-1076'), -4*TINY) self.identical(fromHex('-0X10p-1076'), -4*TINY) # ... and near MIN ... self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY) self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY) self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY) self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN) self.identical(fromHex('0x1.00000000000000p-1022'), MIN) self.identical(fromHex('0x1.00000000000002p-1022'), MIN) self.identical(fromHex('0x1.00000000000004p-1022'), MIN) self.identical(fromHex('0x1.00000000000006p-1022'), MIN) self.identical(fromHex('0x1.00000000000008p-1022'), MIN) self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY) self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY) self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY) # ... and near 1.0. self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS) self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS) self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0) self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0) self.identical(fromHex('0X0.fffffffffffffep0'), 1.0) self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0) self.identical(fromHex('0X1.00000000000000p0'), 1.0) self.identical(fromHex('0X1.00000000000001p0'), 1.0) self.identical(fromHex('0x1.00000000000002p0'), 1.0) self.identical(fromHex('0X1.00000000000003p0'), 1.0) self.identical(fromHex('0x1.00000000000004p0'), 1.0) self.identical(fromHex('0X1.00000000000005p0'), 1.0) self.identical(fromHex('0X1.00000000000006p0'), 1.0) self.identical(fromHex('0X1.00000000000007p0'), 1.0) self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'), 1.0) self.identical(fromHex('0x1.00000000000008p0'), 1.0) self.identical(fromHex('0x1.00000000000008000000000000000001p0'), 1+EPS) self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS) self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS) self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS) self.identical(fromHex('0X1.00000000000018000000000000000001p0'), 1.0+2*EPS) self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS) self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS) def test_roundtrip(self): def roundtrip(x): return fromHex(toHex(x)) for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]: self.identical(x, roundtrip(x)) self.identical(-x, roundtrip(-x)) # fromHex(toHex(x)) should exactly recover x, for any non-NaN float x. import random for i in range(10000): e = random.randrange(-1200, 1200) m = random.random() s = random.choice([1.0, -1.0]) try: x = s*ldexp(m, e) except OverflowError: pass else: self.identical(x, fromHex(toHex(x))) def test_subclass(self): class F(float): def __new__(cls, value): return float.__new__(cls, value + 1) f = F.fromhex((1.5).hex()) self.assertIs(type(f), F) self.assertEqual(f, 2.5) class F2(float): def __init__(self, value): self.foo = 'bar' f = F2.fromhex((1.5).hex()) self.assertIs(type(f), F2) self.assertEqual(f, 1.5) self.assertEqual(getattr(f, 'foo', 'none'), 'bar') if __name__ == '__main__': unittest.main()
yotchang4s/cafebabepy
src/main/python/test/test_float.py
Python
bsd-3-clause
62,623
from sklearn.ensemble import RandomForestClassifier def get_estimator(): clf = RandomForestClassifier(n_estimators=10, max_leaf_nodes=10, random_state=61) return clf
paris-saclay-cds/ramp-workflow
rampwf/tests/kits/iris_data_label/submissions/random_forest_10_10/estimator.py
Python
bsd-3-clause
209
from django.template import Template, Context, TemplateSyntaxError try: from PIL import Image except ImportError: import Image from django.core.files import storage from easy_thumbnails import test, alias from easy_thumbnails.conf import settings from easy_thumbnails.files import get_thumbnailer class Base(test.BaseTest): def setUp(self): super(Base, self).setUp() self.storage = test.TemporaryStorage() # Save a test image. self.filename = self.create_image(self.storage, 'test.jpg') # Required so that IOError's get wrapped as TemplateSyntaxError settings.TEMPLATE_DEBUG = True def tearDown(self): self.storage.delete_temporary_storage() super(Base, self).tearDown() def render_template(self, source): source_image = get_thumbnailer(self.storage, self.filename) source_image.thumbnail_storage = self.storage context = Context({ 'source': source_image, 'storage': self.storage, 'filename': self.filename, 'invalid_filename': 'not%s' % self.filename, 'size': (90, 100), 'invalid_size': (90, 'fish'), 'strsize': '80x90', 'invalid_strsize': ('1notasize2'), 'invalid_q': 'notanumber'}) source = '{% load thumbnail %}' + source return Template(source).render(context) def verify_thumbnail(self, expected_size, options, source_filename=None, transparent=False): if source_filename is None: source_filename = self.filename self.assertTrue(isinstance(options, dict)) # Verify that the thumbnail file exists thumbnailer = get_thumbnailer(self.storage, source_filename) expected_filename = thumbnailer.get_thumbnail_name( options, transparent=transparent) self.assertTrue( self.storage.exists(expected_filename), 'Thumbnail file %r not found' % expected_filename) # Verify the thumbnail has the expected dimensions image = Image.open(self.storage.open(expected_filename)) self.assertEqual(image.size, expected_size) return expected_filename class ThumbnailTagTest(Base): restore_settings = ['THUMBNAIL_DEBUG', 'TEMPLATE_DEBUG'] def testTagInvalid(self): # No args, or wrong number of args src = '{% thumbnail %}' self.assertRaises(TemplateSyntaxError, self.render_template, src) src = '{% thumbnail source %}' self.assertRaises(TemplateSyntaxError, self.render_template, src) src = '{% thumbnail source 80x80 as variable crop %}' self.assertRaises(TemplateSyntaxError, self.render_template, src) # Invalid option src = '{% thumbnail source 240x200 invalid %}' self.assertRaises(TemplateSyntaxError, self.render_template, src) # Old comma separated options format can only have an = for quality src = '{% thumbnail source 80x80 crop=1,quality=1 %}' self.assertRaises(TemplateSyntaxError, self.render_template, src) # Invalid quality src_invalid = '{% thumbnail source 240x200 quality=invalid_q %}' src_missing = '{% thumbnail source 240x200 quality=missing_q %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src_invalid), '') self.assertEqual(self.render_template(src_missing), '') # ...and with THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(TemplateSyntaxError, self.render_template, src_invalid) self.assertRaises(TemplateSyntaxError, self.render_template, src_missing) # Invalid source src = '{% thumbnail invalid_source 80x80 %}' src_on_context = '{% thumbnail invalid_source 80x80 as thumb %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src), '') # ...and with THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(TemplateSyntaxError, self.render_template, src) self.assertRaises(TemplateSyntaxError, self.render_template, src_on_context) # Non-existant source src = '{% thumbnail non_existant_source 80x80 %}' src_on_context = '{% thumbnail non_existant_source 80x80 as thumb %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src), '') # ...and with THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(TemplateSyntaxError, self.render_template, src) # Invalid size as a tuple: src = '{% thumbnail source invalid_size %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src), '') # ...and THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(ValueError, self.render_template, src) # Invalid size as a string: src = '{% thumbnail source invalid_strsize %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src), '') # ...and THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(TemplateSyntaxError, self.render_template, src) # Non-existant size src = '{% thumbnail source non_existant_size %}' # ...with THUMBNAIL_DEBUG = False settings.THUMBNAIL_DEBUG = False self.assertEqual(self.render_template(src), '') # ...and THUMBNAIL_DEBUG = True settings.THUMBNAIL_DEBUG = True self.assertRaises(TemplateSyntaxError, self.render_template, src) def testTag(self): # Set THUMBNAIL_DEBUG = True to make it easier to trace any failures settings.THUMBNAIL_DEBUG = True # Basic output = self.render_template( 'src="{% thumbnail source 240x240 %}"') expected = self.verify_thumbnail((240, 180), {'size': (240, 240)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="%s"' % expected_url) # Size from context variable # as a tuple: output = self.render_template( 'src="{% thumbnail source size %}"') expected = self.verify_thumbnail((90, 68), {'size': (90, 100)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="%s"' % expected_url) # as a string: output = self.render_template( 'src="{% thumbnail source strsize %}"') expected = self.verify_thumbnail((80, 60), {'size': (80, 90)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="%s"' % expected_url) # On context output = self.render_template( 'height:{% thumbnail source 240x240 as thumb %}{{ thumb.height }}') self.assertEqual(output, 'height:180') # With options and quality output = self.render_template( 'src="{% thumbnail source 240x240 sharpen crop quality=95 %}"') # Note that the opts are sorted to ensure a consistent filename. expected = self.verify_thumbnail( (240, 240), {'size': (240, 240), 'crop': True, 'sharpen': True, 'quality': 95}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="%s"' % expected_url) # With option and quality on context (also using its unicode method to # display the url) output = self.render_template( '{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}' 'width:{{ thumb.width }}, url:{{ thumb.url }}') self.assertEqual(output, 'width:240, url:%s' % expected_url) # One dimensional resize output = self.render_template('src="{% thumbnail source 100x0 %}"') expected = self.verify_thumbnail((100, 75), {'size': (100, 0)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="%s"' % expected_url) class ThumbnailerBase(Base): restore_settings = ['THUMBNAIL_ALIASES', 'THUMBNAIL_MEDIA_ROOT'] def setUp(self): super(ThumbnailerBase, self).setUp() settings.THUMBNAIL_MEDIA_ROOT = self.storage.path('') settings.THUMBNAIL_ALIASES = { '': { 'small': {'size': (20, 20), 'crop': True}, }, } alias.aliases.populate_from_settings() # Make the temporary storage location the default storage for now. self._old_default_storage = storage.default_storage._wrapped storage.default_storage._wrapped = self.storage def tearDown(self): # Put the default storage back how we found it. storage.default_storage._wrapped = self._old_default_storage super(ThumbnailerBase, self).tearDown() # Repopulate the aliases (setting reverted by super) alias.aliases.populate_from_settings() class ThumbnailerFilterTest(ThumbnailerBase): def test_get(self): src = ( '{% with t=filename|thumbnailer %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) expected = self.verify_thumbnail( (20, 20), settings.THUMBNAIL_ALIASES['']['small']) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, expected_url) def test_relative_name(self): src = ( '{% with t=storage|thumbnailer:filename %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) expected = self.verify_thumbnail( (20, 20), settings.THUMBNAIL_ALIASES['']['small']) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, expected_url) def test_invalid(self): src = ( '{% with t=invalid_filename|thumbnailer %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) self.assertEqual(output, '') class ThumbnailerPassiveFilterTest(ThumbnailerBase): def test_check_generate(self): src = ( '{% with t=filename|thumbnailer_passive %}' '{{ t.generate }}{% endwith %}' ) output = self.render_template(src) self.assertEqual(output, 'False') def test_get_existing(self): options = settings.THUMBNAIL_ALIASES['']['small'] # Pregenerate the thumbnail. get_thumbnailer(self.storage, self.filename).get_thumbnail(options) src = ( '{% with t=filename|thumbnailer_passive %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) expected = self.verify_thumbnail((20, 20), options) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, expected_url) def test_get_missing(self): src = ( '{% with t=filename|thumbnailer_passive %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) self.assertEqual(output, '') def test_invalid(self): src = ( '{% with t=invalid_filename|thumbnailer_passive %}' '{{ t.small.url }}{% endwith %}' ) output = self.render_template(src) self.assertEqual(output, '') class ThumbnailTagAliasTest(ThumbnailerBase): def assertCorrectOutput(self, src, alias_name, **overrides): options = settings.THUMBNAIL_ALIASES[''][alias_name] options.update(overrides) output = self.render_template(src) expected = self.verify_thumbnail(options['size'], options) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, expected_url) def test_invalid_alias_name(self): self.assertEqual( self.render_template('{% thumbnail filename "notanalias" %}'), '' ) def test_correct_alias(self): self.assertCorrectOutput('{% thumbnail filename "small" %}', 'small') def test_alias_overrides(self): self.assertCorrectOutput( '{% thumbnail filename "small" upscale %}', 'small', upscale=True, ) self.assertCorrectOutput( '{% thumbnail filename "small" upscale bw %}', 'small', bw=True, upscale=True, )
jrief/easy-thumbnails
easy_thumbnails/tests/templatetags.py
Python
bsd-3-clause
12,831
from django.contrib.auth.models import User from socialgraph.models import UserLink def get_people_user_follows(user): """ Returns a ``QuerySet`` representing the users that the given user follows. """ ul = UserLink.objects.filter(from_user=user).values_list('to_user', flat=True) return User.objects.filter(id__in=ul) def get_people_following_user(user): """ Returns a ``QuerySet`` representing the users that follow the given user. """ ul = UserLink.objects.filter(to_user=user).values_list('from_user', flat=True) return User.objects.filter(id__in=ul) def get_mutual_followers(user): """ Returns a ``QuerySet`` representing the users that the given user follows, who also follow the given user back. """ follows = UserLink.objects.filter(from_user=user).values_list('to_user', flat=True) following = UserLink.objects.filter(to_user=user).values_list('from_user', flat=True) return User.objects.filter(id__in=set(follows).intersection(set(following)))
mvayngrib/startthedark
socialgraph/util.py
Python
bsd-3-clause
1,058
import importlib import inspect import os import re import sys import tempfile from io import StringIO from django.conf.urls import url from django.core import mail from django.core.files.uploadedfile import SimpleUploadedFile from django.db import DatabaseError, connection from django.template import TemplateDoesNotExist from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import LoggingCaptureMixin, patch_logger from django.urls import reverse from django.utils.encoding import force_bytes from django.utils.functional import SimpleLazyObject from django.views.debug import ( CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter, cleanse_setting, technical_500_response, ) from .. import BrokenException, except_args from ..views import ( custom_exception_reporter_filter_view, index_page, multivalue_dict_key_error, non_sensitive_view, paranoid_view, sensitive_args_function_caller, sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view, ) PY36 = sys.version_info >= (3, 6) class User: def __str__(self): return 'jacob' class WithoutEmptyPathUrls: urlpatterns = [url(r'url/$', index_page, name='url')] class CallableSettingWrapperTests(SimpleTestCase): """ Unittests for CallableSettingWrapper """ def test_repr(self): class WrappedCallable: def __repr__(self): return "repr from the wrapped callable" def __call__(self): pass actual = repr(CallableSettingWrapper(WrappedCallable())) self.assertEqual(actual, "repr from the wrapped callable") @override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls') class DebugViewTests(LoggingCaptureMixin, SimpleTestCase): def test_files(self): response = self.client.get('/raises/') self.assertEqual(response.status_code, 500) data = { 'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'), } response = self.client.post('/raises/', data) self.assertContains(response, 'file_data.txt', status_code=500) self.assertNotContains(response, 'haha', status_code=500) def test_400(self): # When DEBUG=True, technical_500_template() is called. response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) # Ensure no 403.html template exists to test the default case. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', }]) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) # Set up a test 403.html template. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'loaders': [ ('django.template.loaders.locmem.Loader', { '403.html': 'This is a test template for a 403 error ({{ exception }}).', }), ], }, }]) def test_403_template(self): response = self.client.get('/raises403/') self.assertContains(response, 'test template', status_code=403) self.assertContains(response, '(Insufficient Permissions).', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_raised_404(self): response = self.client.get('/views/raises404/') self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) def test_404_not_in_urls(self): response = self.client.get('/not-in-urls') self.assertNotContains(response, "Raised by:", status_code=404) self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) @override_settings(ROOT_URLCONF=WithoutEmptyPathUrls) def test_404_empty_path_not_in_urls(self): response = self.client.get('/') self.assertContains(response, "The empty path didn't match any of these.", status_code=404) def test_technical_404(self): response = self.client.get('/views/technical404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.technical404", status_code=404) def test_classbased_technical_404(self): response = self.client.get('/views/classbased404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.Http404View", status_code=404) def test_view_exceptions(self): for n in range(len(except_args)): with self.assertRaises(BrokenException): self.client.get(reverse('view_exception', args=(n,))) def test_non_l10ned_numeric_ids(self): """ Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized. """ with self.settings(DEBUG=True, USE_L10N=True): response = self.client.get('/raises500/') # We look for a HTML fragment of the form # '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"' self.assertContains(response, '<div class="context" id="', status_code=500) match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content) self.assertIsNotNone(match) id_repr = match.group('id') self.assertFalse( re.search(b'[^c0-9]', id_repr), "Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr ) def test_template_exceptions(self): for n in range(len(except_args)): try: self.client.get(reverse('template_exception', args=(n,))) except Exception: raising_loc = inspect.trace()[-1][-2][0].strip() self.assertNotEqual( raising_loc.find('raise BrokenException'), -1, "Failed to find 'raise BrokenException' in last frame of " "traceback, instead found: %s" % raising_loc ) def test_template_loader_postmortem(self): """Tests for not existing file""" template_name = "notfound.html" with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile: tempdir = os.path.dirname(tmpfile.name) template_path = os.path.join(tempdir, template_name) with override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [tempdir], }]): response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name})) self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2) # Assert as HTML. self.assertContains( response, '<li><code>django.template.loaders.filesystem.Loader</code>: ' '%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'), status_code=500, html=True, ) def test_no_template_source_loaders(self): """ Make sure if you don't specify a template, the debug view doesn't blow up. """ with self.assertRaises(TemplateDoesNotExist): self.client.get('/render_no_template/') @override_settings(ROOT_URLCONF='view_tests.default_urls') def test_default_urlconf_template(self): """ Make sure that the default URLconf template is shown shown instead of the technical 404 page, if the user has not altered their URLconf yet. """ response = self.client.get('/') self.assertContains( response, "<h2>Congratulations on your first Django-powered page.</h2>" ) @override_settings(ROOT_URLCONF='view_tests.regression_21530_urls') def test_regression_21530(self): """ Regression test for bug #21530. If the admin app include is replaced with exactly one url pattern, then the technical 404 template should be displayed. The bug here was that an AttributeError caused a 500 response. """ response = self.client.get('/') self.assertContains( response, "Page not found <span>(404)</span>", status_code=404 ) class DebugViewQueriesAllowedTests(SimpleTestCase): # May need a query to initialize MySQL connection allow_database_queries = True def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by performing an invalid query and passing the exception to the debug view. """ with connection.cursor() as cursor: try: cursor.execute('INVALID SQL') except DatabaseError: exc_info = sys.exc_info() rf = RequestFactory() response = technical_500_response(rf.get('/'), *exc_info) self.assertContains(response, 'OperationalError at /', status_code=500) @override_settings( DEBUG=True, ROOT_URLCONF='view_tests.urls', # No template directories are configured, so no templates will be found. TEMPLATES=[{ 'BACKEND': 'django.template.backends.dummy.TemplateStrings', }], ) class NonDjangoTemplatesDebugViewTests(SimpleTestCase): def test_400(self): # When DEBUG=True, technical_500_template() is called. with patch_logger('django.security.SuspiciousOperation', 'error'): response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"}) response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500) class ExceptionReporterTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Can&#39;t find my keys</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>jacob</p>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ValueError</h1>', html) self.assertIn('<pre class="exception_value">Can&#39;t find my keys</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_eol_support(self): """The ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" LINES = list('print %d' % i for i in range(1, 6)) reporter = ExceptionReporter(None, None, None, None) for newline in ['\n', '\r\n', '\r']: fd, filename = tempfile.mkstemp(text=False) os.write(fd, force_bytes(newline.join(LINES) + newline)) os.close(fd) try: self.assertEqual( reporter._get_lines_from_file(filename, 3, 2), (1, LINES[1:3], LINES[3], LINES[4:]) ) finally: os.unlink(filename) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_reporting_of_nested_exceptions(self): request = self.rf.get('/test_view/') try: try: raise AttributeError('Top level') except AttributeError as explicit: try: raise ValueError('Second exception') from explicit except ValueError: raise IndexError('Final exception') except Exception: # Custom exception handler, just pass it into ExceptionReporter exc_type, exc_value, tb = sys.exc_info() explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:' implicit_exc = 'During handling of the above exception ({0}), another exception occurred:' reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() # Both messages are twice on page -- one rendered as html, # one as plain text (for pastebin) self.assertEqual(2, html.count(explicit_exc.format("Top level"))) self.assertEqual(2, html.count(implicit_exc.format("Second exception"))) text = reporter.get_traceback_text() self.assertIn(explicit_exc.format("Top level"), text) self.assertIn(implicit_exc.format("Second exception"), text) def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertIn('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">I&#39;m a little teapot</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertIn('<h1>Report</h1>', html) self.assertIn('<pre class="exception_value">I&#39;m a little teapot</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_non_utf8_values_handling(self): "Non-UTF-8 exceptions/values should not make the output generation choke." try: class NonUtf8Output(Exception): def __repr__(self): return b'EXC\xe9EXC' somevar = b'VAL\xe9VAL' # NOQA raise NonUtf8Output() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('VAL\\xe9VAL', html) self.assertIn('EXC\\xe9EXC', html) def test_unprintable_values_handling(self): "Unprintable values should not make the output generation choke." try: class OomOutput: def __repr__(self): raise MemoryError('OOM') oomvalue = OomOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<td class="code"><pre>Error in formatting', html) def test_too_large_values_handling(self): "Large values should not create a large HTML." large = 256 * 1024 repr_of_str_adds = len(repr('')) try: class LargeOutput: def __repr__(self): return repr('A' * large) largevalue = LargeOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb self.assertIn('&lt;trimmed %d bytes string&gt;' % (large + repr_of_str_adds,), html) def test_unfrozen_importlib(self): """ importlib is not a frozen app, but its loader thinks it's frozen which results in an ImportError. Refs #21443. """ try: request = self.rf.get('/test_view/') importlib.import_module('abc.def.invalid.name') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>%sError at /test_view/</h1>' % 'ModuleNotFound' if PY36 else 'Import', html) def test_ignore_traceback_evaluation_exceptions(self): """ Don't trip over exceptions generated by crafted objects when evaluating them while cleansing (#24455). """ class BrokenEvaluation(Exception): pass def broken_setup(): raise BrokenEvaluation request = self.rf.get('/test_view/') broken_lazy = SimpleLazyObject(broken_setup) try: bool(broken_lazy) except BrokenEvaluation: exc_type, exc_value, tb = sys.exc_info() self.assertIn( "BrokenEvaluation", ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(), "Evaluation exception reason not mentioned in traceback" ) @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn("http://evil.com/", html) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ value = '<td>items</td><td class="code"><pre>&#39;Oops&#39;</pre></td>' # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML( '<td>items</td><td class="code"><pre>&lt;InMemoryUploadedFile: ' 'items (application/octet-stream)&gt;</pre></td>', html ) # COOKES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML('<td>items</td><td class="code"><pre>&#39;Oops&#39;</pre></td>', html) def test_exception_fetching_user(self): """ The error page can be rendered if the current user can't be retrieved (such as when the database is unavailable). """ class ExceptionUser: def __str__(self): raise Exception() request = self.rf.get('/test_view/') request.user = ExceptionUser() try: raise ValueError('Oops') except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>[unable to retrieve the current user]</p>', html) text = reporter.get_traceback_text() self.assertIn('USER: [unable to retrieve the current user]', text) class PlainTextReportTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError at /test_view/', text) self.assertIn("Can't find my keys", text) self.assertIn('Request Method:', text) self.assertIn('Request URL:', text) self.assertIn('USER: jacob', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback:', text) self.assertIn('Request information:', text) self.assertNotIn('Request data not supplied', text) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError', text) self.assertIn("Can't find my keys", text) self.assertNotIn('Request Method:', text) self.assertNotIn('Request URL:', text) self.assertNotIn('USER:', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback:', text) self.assertIn('Request data not supplied', text) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) reporter.get_traceback_text() def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) reporter.get_traceback_text() def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn('items = <InMemoryUploadedFile:', text) # COOKES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("http://evil.com/", text) class ExceptionReportTestMixin: # Mixin used in the ExceptionReporterFilterTests and # AjaxResponseExceptionReporterFilter tests below breakfast_data = {'sausage-key': 'sausage-value', 'baked-beans-key': 'baked-beans-value', 'hash-brown-key': 'hash-brown-value', 'bacon-key': 'bacon-value'} def verify_unsafe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # All variables are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertContains(response, k, status_code=500) self.assertContains(response, v, status_code=500) def verify_safe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Non-sensitive variable's name and value are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) # Sensitive variable's name is shown but not its value. self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # Non-sensitive POST parameters' values are shown. self.assertContains(response, 'baked-beans-value', status_code=500) self.assertContains(response, 'hash-brown-value', status_code=500) # Sensitive POST parameters' values are not shown. self.assertNotContains(response, 'sausage-value', status_code=500) self.assertNotContains(response, 'bacon-value', status_code=500) def verify_paranoid_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that no variables or POST parameters are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Show variable names but not their values. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertNotContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # No POST parameters' values are shown. self.assertNotContains(response, v, status_code=500) def verify_unsafe_email(self, view, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertIn(k, body_plain) self.assertIn(v, body_plain) self.assertIn(k, body_html) self.assertIn(v, body_html) def verify_safe_email(self, view, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertNotIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body_plain) # Non-sensitive POST parameters' values are shown. self.assertIn('baked-beans-value', body_plain) self.assertIn('hash-brown-value', body_plain) self.assertIn('baked-beans-value', body_html) self.assertIn('hash-brown-value', body_html) # Sensitive POST parameters' values are not shown. self.assertNotIn('sausage-value', body_plain) self.assertNotIn('bacon-value', body_plain) self.assertNotIn('sausage-value', body_html) self.assertNotIn('bacon-value', body_html) def verify_paranoid_email(self, view): """ Asserts that no variables or POST parameters are displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body = str(email.body) self.assertNotIn('cooked_eggs', body) self.assertNotIn('scrambled', body) self.assertNotIn('sauce', body) self.assertNotIn('worcestershire', body) for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body) # No POST parameters' values are shown. self.assertNotIn(v, body) @override_settings(ROOT_URLCONF='view_tests.urls') class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports (#14614). """ rf = RequestFactory() def test_non_sensitive_request(self): """ Everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_paranoid_request(self): """ No POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ The sensitive_variables decorator works with object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_method_view, check_for_POST_params=False) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_sensitive_function_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False) def test_sensitive_function_keyword_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots: __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get('/raises500/') self.assertContains(response, 'FOOBAR', status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: FOOBAR = { setting: "should not be displayed", 'recursive': {setting: "should not be displayed"}, } with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports. Here we specifically test the plain text 500 debug-only error page served when it has been detected the request was sent by JS code. We don't check for (non)existence of frames vars in the traceback information section of the response content because we don't include them in these error pages. Refs #14614. """ rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest') def test_non_sensitive_request(self): """ Request info can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) def test_sensitive_request(self): """ Sensitive POST parameters cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view, check_for_vars=False) def test_paranoid_request(self): """ No POST parameters can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view, check_for_vars=False) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) class HelperFunctionTests(SimpleTestCase): def test_cleanse_setting_basic(self): self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST') self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE) def test_cleanse_setting_ignore_case(self): self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE) def test_cleanse_setting_recurses_in_dictionary(self): initial = {'login': 'cooper', 'password': 'secret'} expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE} self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
mattseymour/django
tests/view_tests/tests/test_debug.py
Python
bsd-3-clause
46,158
# Copyright (c) 2012, 2017 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Sandberg from m5 import fatal import m5.objects import inspect import sys from textwrap import TextWrapper # Dictionary of mapping names of real CPU models to classes. _cpu_classes = {} def is_cpu_class(cls): """Determine if a class is a CPU that can be instantiated""" # We can't use the normal inspect.isclass because the ParamFactory # and ProxyFactory classes have a tendency to confuse it. try: return issubclass(cls, m5.objects.BaseCPU) and \ not cls.abstract and \ not issubclass(cls, m5.objects.CheckerCPU) except (TypeError, AttributeError): return False def get(name): """Get a CPU class from a user provided class name or alias.""" try: cpu_class = _cpu_classes[name] return cpu_class except KeyError: print "%s is not a valid CPU model." % (name,) sys.exit(1) def print_cpu_list(): """Print a list of available CPU classes including their aliases.""" print "Available CPU classes:" doc_wrapper = TextWrapper(initial_indent="\t\t", subsequent_indent="\t\t") for name, cls in _cpu_classes.items(): print "\t%s" % name # Try to extract the class documentation from the class help # string. doc = inspect.getdoc(cls) if doc: for line in doc_wrapper.wrap(doc): print line def cpu_names(): """Return a list of valid CPU names.""" return _cpu_classes.keys() def config_etrace(cpu_cls, cpu_list, options): if issubclass(cpu_cls, m5.objects.DerivO3CPU): # Assign the same file name to all cpus for now. This must be # revisited when creating elastic traces for multi processor systems. for cpu in cpu_list: # Attach the elastic trace probe listener. Set the protobuf trace # file names. Set the dependency window size equal to the cpu it # is attached to. cpu.traceListener = m5.objects.ElasticTrace( instFetchTraceFile = options.inst_trace_file, dataDepTraceFile = options.data_trace_file, depWindowSize = 3 * cpu.numROBEntries) # Make the number of entries in the ROB, LQ and SQ very # large so that there are no stalls due to resource # limitation as such stalls will get captured in the trace # as compute delay. For replay, ROB, LQ and SQ sizes are # modelled in the Trace CPU. cpu.numROBEntries = 512; cpu.LQEntries = 128; cpu.SQEntries = 128; else: fatal("%s does not support data dependency tracing. Use a CPU model of" " type or inherited from DerivO3CPU.", cpu_cls) # Add all CPUs in the object hierarchy. for name, cls in inspect.getmembers(m5.objects, is_cpu_class): _cpu_classes[name] = cls from m5.defines import buildEnv from importlib import import_module for package in [ "generic", buildEnv['TARGET_ISA']]: try: package = import_module(".cores." + package, package=__package__) except ImportError: # No timing models for this ISA continue for mod_name, module in inspect.getmembers(package, inspect.ismodule): for name, cls in inspect.getmembers(module, is_cpu_class): _cpu_classes[name] = cls
rallylee/gem5
configs/common/CpuConfig.py
Python
bsd-3-clause
5,477
from __future__ import absolute_import from django.conf import settings from rest_framework import serializers, status from rest_framework.response import Response from sentry.api.bases.user import UserEndpoint from sentry.api.serializers import serialize from sentry.models import User class BaseUserSerializer(serializers.ModelSerializer): def validate_username(self, attrs, source): value = attrs[source] if User.objects.filter(username__iexact=value).exclude(id=self.object.id).exists(): raise serializers.ValidationError('That username is already in use.') return attrs def validate(self, attrs): attrs = super(BaseUserSerializer, self).validate(attrs) if self.object.email == self.object.username: if attrs.get('username', self.object.email) != self.object.email: attrs.setdefault('email', attrs['username']) return attrs def restore_object(self, attrs, instance=None): instance = super(BaseUserSerializer, self).restore_object(attrs, instance) instance.is_active = attrs.get('isActive', instance.is_active) return instance class UserSerializer(BaseUserSerializer): class Meta: model = User fields = ('name', 'username', 'email') def validate_username(self, attrs, source): value = attrs[source] if User.objects.filter(username__iexact=value).exclude(id=self.object.id).exists(): raise serializers.ValidationError('That username is already in use.') return attrs def validate(self, attrs): for field in settings.SENTRY_MANAGED_USER_FIELDS: attrs.pop(field, None) attrs = super(UserSerializer, self).validate(attrs) return attrs class AdminUserSerializer(BaseUserSerializer): isActive = serializers.BooleanField(source='is_active') class Meta: model = User # no idea wtf is up with django rest framework, but we need is_active # and isActive fields = ('name', 'username', 'isActive', 'email') # write_only_fields = ('password',) class UserDetailsEndpoint(UserEndpoint): def get(self, request, user): data = serialize(user, request.user) return Response(data) def put(self, request, user): if request.is_superuser(): serializer_cls = AdminUserSerializer else: serializer_cls = UserSerializer serializer = serializer_cls(user, data=request.DATA, partial=True) if serializer.is_valid(): user = serializer.save() return Response(serialize(user, request.user)) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
daevaorn/sentry
src/sentry/api/endpoints/user_details.py
Python
bsd-3-clause
2,738
# this data should be from nws. import pyeto latitude_deg = 38.01 latitude = pyeto.deg2rad(latitude_deg) day_of_year = 206 tmin = 37 tmax = 53 coastal = True altitude = 147 rh_min = 13 rh_max = 88 ws = 1.3 tmean = pyeto.daily_mean_t(tmin, tmax) atmos_pres = pyeto.atm_pressure(altitude) psy = pyeto.psy_const(atmos_pres) # Humidity svp_tmin = pyeto.svp_from_t(tmin) svp_tmax = pyeto.svp_from_t(tmax) delta_svp = pyeto.delta_svp(tmean) svp = pyeto.mean_svp(tmin, tmax) avp = pyeto.avp_from_rhmin_rhmax(svp_tmin, svp_tmax, rh_min, rh_max) # Radiation sol_dec = pyeto.sol_dec(day_of_year) sha = pyeto.sunset_hour_angle(latitude, sol_dec) ird = pyeto.inv_rel_dist_earth_sun(day_of_year) et_rad = pyeto.et_rad(latitude, sol_dec, sha, ird) cs_rad = pyeto.cs_rad(altitude, et_rad) sol_rad = pyeto.sol_rad_from_t(et_rad, cs_rad, tmin, tmax, coastal) ni_sw_rad = pyeto.net_in_sol_rad(sol_rad) no_lw_rad = pyeto.net_out_lw_rad(pyeto.celsius2kelvin(tmin), pyeto.celsius2kelvin(tmax), sol_rad, cs_rad, avp) net_rad = pyeto.net_rad(ni_sw_rad, no_lw_rad) eto = pyeto.fao56_penman_monteith(net_rad, pyeto.celsius2kelvin(tmean), ws, svp, avp, delta_svp, psy) print eto
SB-Technology-Holdings-International/WateringWebClient
server/old-water.py
Python
bsd-3-clause
1,158
# Copyright (c) 2015, Activision Publishing, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from assertpy import assert_that,fail class TestDescription(object): def test_constructor(self): try: assert_that(1, 'extra msg').is_equal_to(2) fail('should have raised error') except AssertionError as ex: assert_that(str(ex)).is_equal_to('[extra msg] Expected <1> to be equal to <2>, but was not.') def test_described_as(self): try: assert_that(1).described_as('extra msg').is_equal_to(2) fail('should have raised error') except AssertionError as ex: assert_that(str(ex)).is_equal_to('[extra msg] Expected <1> to be equal to <2>, but was not.') def test_described_as_double(self): try: assert_that(1).described_as('extra msg').described_as('other msg').is_equal_to(2) fail('should have raised error') except AssertionError as ex: assert_that(str(ex)).is_equal_to('[other msg] Expected <1> to be equal to <2>, but was not.') def test_described_as_chained(self): try: assert_that(1).described_as('extra msg').is_equal_to(1).described_as('other msg').is_equal_to(1).described_as('last msg').is_equal_to(2) fail('should have raised error') except AssertionError as ex: assert_that(str(ex)).is_equal_to('[last msg] Expected <1> to be equal to <2>, but was not.')
wuan/assertpy
tests/test_description.py
Python
bsd-3-clause
2,938
""" Module for special-purpose plots. """ from __future__ import absolute_import from .hinton import hinton from .errorfill import errorfill __all__ = ['hinton', 'errorfill']
tonysyu/mpltools
mpltools/special/__init__.py
Python
bsd-3-clause
179
from custom.opm.constants import InvalidRow from custom.opm.tests import OPMCaseReportTestBase, OPMCase, MockCaseRow class TestInvalidDates(OPMCaseReportTestBase): def testBadEdd(self): case = OPMCase( forms=[], edd='this is not a date', ) self.assertRaises(InvalidRow, MockCaseRow, case, self.report) def testBadDod(self): case = OPMCase( forms=[], dod='this is not a date', ) self.assertRaises(InvalidRow, MockCaseRow, case, self.report)
puttarajubr/commcare-hq
custom/opm/tests/test_regressions.py
Python
bsd-3-clause
548
import os import typing as t from warnings import warn from .app import Flask from .blueprints import Blueprint from .globals import _request_ctx_stack class UnexpectedUnicodeError(AssertionError, UnicodeError): """Raised in places where we want some better error reporting for unexpected unicode or binary data. """ class DebugFilesKeyError(KeyError, AssertionError): """Raised from request.files during debugging. The idea is that it can provide a better error message than just a generic KeyError/BadRequest. """ def __init__(self, request, key): form_matches = request.form.getlist(key) buf = [ f"You tried to access the file {key!r} in the request.files" " dictionary but it does not exist. The mimetype for the" f" request is {request.mimetype!r} instead of" " 'multipart/form-data' which means that no file contents" " were transmitted. To fix this error you should provide" ' enctype="multipart/form-data" in your form.' ] if form_matches: names = ", ".join(repr(x) for x in form_matches) buf.append( "\n\nThe browser instead transmitted some file names. " f"This was submitted: {names}" ) self.msg = "".join(buf) def __str__(self): return self.msg class FormDataRoutingRedirect(AssertionError): """This exception is raised by Flask in debug mode if it detects a redirect caused by the routing system when the request method is not GET, HEAD or OPTIONS. Reasoning: form data will be dropped. """ def __init__(self, request): exc = request.routing_exception buf = [ f"A request was sent to this URL ({request.url}) but a" " redirect was issued automatically by the routing system" f" to {exc.new_url!r}." ] # In case just a slash was appended we can be extra helpful if f"{request.base_url}/" == exc.new_url.split("?")[0]: buf.append( " The URL was defined with a trailing slash so Flask" " will automatically redirect to the URL with the" " trailing slash if it was accessed without one." ) buf.append( " Make sure to directly send your" f" {request.method}-request to this URL since we can't make" " browsers or HTTP clients redirect with form data reliably" " or without user interaction." ) buf.append("\n\nNote: this exception is only raised in debug mode") AssertionError.__init__(self, "".join(buf).encode("utf-8")) def attach_enctype_error_multidict(request): """Since Flask 0.8 we're monkeypatching the files object in case a request is detected that does not use multipart form data but the files object is accessed. """ oldcls = request.files.__class__ class newcls(oldcls): def __getitem__(self, key): try: return oldcls.__getitem__(self, key) except KeyError: if key not in request.form: raise raise DebugFilesKeyError(request, key) newcls.__name__ = oldcls.__name__ newcls.__module__ = oldcls.__module__ request.files.__class__ = newcls def _dump_loader_info(loader) -> t.Generator: yield f"class: {type(loader).__module__}.{type(loader).__name__}" for key, value in sorted(loader.__dict__.items()): if key.startswith("_"): continue if isinstance(value, (tuple, list)): if not all(isinstance(x, str) for x in value): continue yield f"{key}:" for item in value: yield f" - {item}" continue elif not isinstance(value, (str, int, float, bool)): continue yield f"{key}: {value!r}" def explain_template_loading_attempts(app: Flask, template, attempts) -> None: """This should help developers understand what failed""" info = [f"Locating template {template!r}:"] total_found = 0 blueprint = None reqctx = _request_ctx_stack.top if reqctx is not None and reqctx.request.blueprint is not None: blueprint = reqctx.request.blueprint for idx, (loader, srcobj, triple) in enumerate(attempts): if isinstance(srcobj, Flask): src_info = f"application {srcobj.import_name!r}" elif isinstance(srcobj, Blueprint): src_info = f"blueprint {srcobj.name!r} ({srcobj.import_name})" else: src_info = repr(srcobj) info.append(f"{idx + 1:5}: trying loader of {src_info}") for line in _dump_loader_info(loader): info.append(f" {line}") if triple is None: detail = "no match" else: detail = f"found ({triple[1] or '<string>'!r})" total_found += 1 info.append(f" -> {detail}") seems_fishy = False if total_found == 0: info.append("Error: the template could not be found.") seems_fishy = True elif total_found > 1: info.append("Warning: multiple loaders returned a match for the template.") seems_fishy = True if blueprint is not None and seems_fishy: info.append( " The template was looked up from an endpoint that belongs" f" to the blueprint {blueprint!r}." ) info.append(" Maybe you did not place a template in the right folder?") info.append(" See https://flask.palletsprojects.com/blueprints/#templates") app.logger.info("\n".join(info)) def explain_ignored_app_run() -> None: if os.environ.get("WERKZEUG_RUN_MAIN") != "true": warn( Warning( "Silently ignoring app.run() because the application is" " run from the flask command line executable. Consider" ' putting app.run() behind an if __name__ == "__main__"' " guard to silence this warning." ), stacklevel=3, )
mitsuhiko/flask
src/flask/debughelpers.py
Python
bsd-3-clause
6,171
# -*- coding: utf-8 -*- from __future__ import absolute_import import pytest from sentry import eventstore from sentry.event_manager import EventManager @pytest.fixture def make_sdk_snapshot(insta_snapshot): def inner(data): mgr = EventManager(data={"sdk": data}) mgr.normalize() evt = eventstore.create_event(data=mgr.get_data()) insta_snapshot( {"errors": evt.data.get("errors"), "to_json": evt.interfaces.get("sdk").to_json()} ) return inner def test_serialize_behavior(make_sdk_snapshot): make_sdk_snapshot( { "name": "sentry-java", "version": "1.0", "integrations": ["log4j"], "packages": [{"name": "maven:io.sentry.sentry", "version": "1.7.10"}], } ) def test_missing_name(make_sdk_snapshot): make_sdk_snapshot({"version": "1.0"}) def test_missing_version(make_sdk_snapshot): make_sdk_snapshot({"name": "sentry-unity"})
beeftornado/sentry
tests/sentry/event_manager/interfaces/test_sdk.py
Python
bsd-3-clause
978
import socket import threading from django.core.handlers.wsgi import WSGIHandler from django.core.management import call_command from django.core.servers import basehttp from django.db import connections from django.test.testcases import TransactionTestCase, TestCase import socket class StoppableWSGIServer(basehttp.WSGIServer): """WSGIServer with short timeout, so that server thread can stop this server.""" def server_bind(self): """Sets timeout to 1 second.""" basehttp.WSGIServer.server_bind(self) self.socket.settimeout(1) def get_request(self): """Checks for timeout when getting request.""" try: sock, address = self.socket.accept() sock.settimeout(None) return (sock, address) except socket.timeout: raise class TestServerThread(threading.Thread): """Thread for running a http server while tests are running.""" def __init__(self, address, port): self.address = address self.port = port self._stopevent = threading.Event() self.started = threading.Event() self.error = None super(TestServerThread, self).__init__() def run(self): """Sets up test server and database and loops over handling http requests.""" try: handler = WSGIHandler() server_address = (self.address, self.port) httpd = StoppableWSGIServer(server_address, basehttp.WSGIRequestHandler) httpd.set_app(handler) self.started.set() except socket.error as e: self.error = e self.started.set() return # Must do database stuff in this new thread if database in memory. from django.conf import settings db = settings.DATABASES['default'] ENGINE = db['ENGINE'] TEST_NAME = db.get('TEST_NAME') if ('sqlite3' in ENGINE or 'spatialite' in ENGINE) \ and (not TEST_NAME or TEST_NAME == ':memory:'): if 'spatialite' in ENGINE: cursor = connections['default'].cursor() cursor.execute('SELECT InitSpatialMetaData()') row = cursor.fetchone() call_command('syncdb', interactive=False, verbosity=0) # Import the fixture data into the test database. if hasattr(self, 'fixtures'): # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0}) # Loop until we get a stop event. while not self._stopevent.isSet(): httpd.handle_request() def join(self, timeout=None): """Stop the thread and wait for it to finish.""" self._stopevent.set() threading.Thread.join(self, timeout) class TestServerTestCase(TransactionTestCase): fixtures = ['test_data.json'] def start_test_server(self, address='localhost', port=8000): """Creates a live test server object (instance of WSGIServer).""" self.server_thread = TestServerThread(address, port) self.server_thread.fixtures = self.fixtures self.server_thread.start() self.server_thread.started.wait() if self.server_thread.error: raise self.server_thread.error def stop_test_server(self): if self.server_thread: self.server_thread.join() class TestCaseWithFixture(TestCase): fixtures = ['test_data.json']
strets123/django-tastypie-tweaks
tests/testcases.py
Python
bsd-3-clause
3,622
from unittest import main from qiita_pet.test.tornado_test_base import TestHandlerBase class TestAuthCreateHandler(TestHandlerBase): database = True def test_get(self): response = self.get('/auth/create/') self.assertEqual(response.code, 200) def test_post(self): post_args = { 'email': 'newuser@foo.bar', 'newpass': 'password' } response = self.post('/auth/create/', post_args) # Make sure page response loaded sucessfully self.assertEqual(response.code, 200) class TestAuthVerifyHandler(TestHandlerBase): def test_get(self): response = self.get('/auth/verify/SOMETHINGHERE?email=test%40foo.bar') self.assertEqual(response.code, 200) class TestAuthLoginHandler(TestHandlerBase): def test_get(self): response = self.get('/auth/login/') self.assertEqual(response.code, 200) # make sure redirect happened properly port = self.get_http_port() self.assertEqual(response.effective_url, 'http://localhost:%d/' % port) def test_post_correct_pass(self): post_args = { 'username': 'test@foo.bar', 'passwd': 'password', 'next': '/' } response = self.post('/auth/login/', post_args) self.assertEqual(response.code, 200) def test_post_wrong_pass(self): post_args = { 'username': 'test@foo.bar', 'passwd': 'wrongpass', 'next': '/' } response = self.post('/auth/login/', post_args) self.assertEqual(response.code, 200) def test_set_current_user(self): # TODO: add proper test for this once figure out how. Issue 567 pass class TestAuthLogoutHandler(TestHandlerBase): def test_get(self): response = self.get('/auth/login/') self.assertEqual(response.code, 200) # make sure redirect happened properly port = self.get_http_port() self.assertEqual(response.effective_url, 'http://localhost:%d/' % port) if __name__ == "__main__": main()
RNAer/qiita
qiita_pet/test/test_auth_handlers.py
Python
bsd-3-clause
2,102
from .observation import Observation from . import galaxymakers from . import blendmakers from . import medsmakers from . import psfmakers from . import utils
kstory8/egret
egret/__init__.py
Python
bsd-3-clause
159
""" websocket - WebSocket client library for Python Copyright (C) 2010 Hiroki Ohtani(liris) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA """ from __future__ import print_function import socket import struct import threading import time import six # websocket modules from ._abnf import * from ._exceptions import * from ._handshake import * from ._http import * from ._logging import * from ._socket import * from ._ssl_compat import * from ._utils import * __all__ = ['WebSocket', 'create_connection'] """ websocket python client. ========================= This version support only hybi-13. Please see http://tools.ietf.org/html/rfc6455 for protocol. """ class WebSocket(object): """ Low level WebSocket interface. This class is based on The WebSocket protocol draft-hixie-thewebsocketprotocol-76 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76 We can connect to the websocket server and send/receive data. The following example is an echo client. >>> import websocket >>> ws = websocket.WebSocket() >>> ws.connect("ws://echo.websocket.org") >>> ws.send("Hello, Server") >>> ws.recv() 'Hello, Server' >>> ws.close() get_mask_key: a callable to produce new mask keys, see the set_mask_key function's docstring for more details sockopt: values for socket.setsockopt. sockopt must be tuple and each element is argument of sock.setsockopt. sslopt: dict object for ssl socket option. fire_cont_frame: fire recv event for each cont frame. default is False enable_multithread: if set to True, lock send method. skip_utf8_validation: skip utf8 validation. """ def __init__(self, get_mask_key=None, sockopt=None, sslopt=None, fire_cont_frame=False, enable_multithread=False, skip_utf8_validation=False, **_): """ Initialize WebSocket object. """ self.sock_opt = sock_opt(sockopt, sslopt) self.handshake_response = None self.sock = None self.connected = False self.get_mask_key = get_mask_key # These buffer over the build-up of a single frame. self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation) self.cont_frame = continuous_frame( fire_cont_frame, skip_utf8_validation) if enable_multithread: self.lock = threading.Lock() self.readlock = threading.Lock() else: self.lock = NoLock() self.readlock = NoLock() def __iter__(self): """ Allow iteration over websocket, implying sequential `recv` executions. """ while True: yield self.recv() def __next__(self): return self.recv() def next(self): return self.__next__() def fileno(self): return self.sock.fileno() def set_mask_key(self, func): """ set function to create musk key. You can customize mask key generator. Mainly, this is for testing purpose. func: callable object. the func takes 1 argument as integer. The argument means length of mask key. This func must return string(byte array), which length is argument specified. """ self.get_mask_key = func def gettimeout(self): """ Get the websocket timeout(second). """ return self.sock_opt.timeout def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout) timeout = property(gettimeout, settimeout) def getsubprotocol(self): """ get subprotocol """ if self.handshake_response: return self.handshake_response.subprotocol else: return None subprotocol = property(getsubprotocol) def getstatus(self): """ get handshake status """ if self.handshake_response: return self.handshake_response.status else: return None status = property(getstatus) def getheaders(self): """ get handshake response header """ if self.handshake_response: return self.handshake_response.headers else: return None def is_ssl(self): return isinstance(self.sock, ssl.SSLSocket) headers = property(getheaders) def connect(self, url, **options): """ Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "redirect_limit" -> number of redirects to follow. "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket. """ # FIXME: "subprotocols" are getting lost, not passed down # FIXME: "header", "cookie", "origin" and "host" too self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout) self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) try: self.handshake_response = handshake(self.sock, *addrs, **options) for attempt in range(options.pop('redirect_limit', 3)): if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES: url = self.handshake_response.headers['location'] self.sock.close() self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) self.handshake_response = handshake(self.sock, *addrs, **options) self.connected = True except: if self.sock: self.sock.close() self.sock = None raise def send(self, payload, opcode=ABNF.OPCODE_TEXT): """ Send the data as string. payload: Payload must be utf-8 string or unicode, if the opcode is OPCODE_TEXT. Otherwise, it must be string(byte array) opcode: operation code to send. Please see OPCODE_XXX. """ frame = ABNF.create_frame(payload, opcode) return self.send_frame(frame) def send_frame(self, frame): """ Send the data frame. frame: frame data created by ABNF.create_frame >>> ws = create_connection("ws://echo.websocket.org/") >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1) >>> ws.send_frame(frame) """ if self.get_mask_key: frame.get_mask_key = self.get_mask_key data = frame.format() length = len(data) trace("send: " + repr(data)) with self.lock: while data: l = self._send(data) data = data[l:] return length def send_binary(self, payload): return self.send(payload, ABNF.OPCODE_BINARY) def ping(self, payload=""): """ send ping data. payload: data payload to send server. """ if isinstance(payload, six.text_type): payload = payload.encode("utf-8") self.send(payload, ABNF.OPCODE_PING) def pong(self, payload): """ send pong data. payload: data payload to send server. """ if isinstance(payload, six.text_type): payload = payload.encode("utf-8") self.send(payload, ABNF.OPCODE_PONG) def recv(self): """ Receive string data(byte array) from the server. return value: string(byte array) value. """ with self.readlock: opcode, data = self.recv_data() if six.PY3 and opcode == ABNF.OPCODE_TEXT: return data.decode("utf-8") elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY: return data else: return '' def recv_data(self, control_frame=False): """ Receive data with operation code. control_frame: a boolean flag indicating whether to return control frame data, defaults to False return value: tuple of operation code and string(byte array) value. """ opcode, frame = self.recv_data_frame(control_frame) return opcode, frame.data def recv_data_frame(self, control_frame=False): """ Receive data with operation code. control_frame: a boolean flag indicating whether to return control frame data, defaults to False return value: tuple of operation code and string(byte array) value. """ while True: frame = self.recv_frame() if not frame: # handle error: # 'NoneType' object has no attribute 'opcode' raise WebSocketProtocolException( "Not a valid frame %s" % frame) elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT): self.cont_frame.validate(frame) self.cont_frame.add(frame) if self.cont_frame.is_fire(frame): return self.cont_frame.extract(frame) elif frame.opcode == ABNF.OPCODE_CLOSE: self.send_close() return frame.opcode, frame elif frame.opcode == ABNF.OPCODE_PING: if len(frame.data) < 126: self.pong(frame.data) else: raise WebSocketProtocolException( "Ping message is too long") if control_frame: return frame.opcode, frame elif frame.opcode == ABNF.OPCODE_PONG: if control_frame: return frame.opcode, frame def recv_frame(self): """ receive data as frame from server. return value: ABNF frame object. """ return self.frame_buffer.recv_frame() def send_close(self, status=STATUS_NORMAL, reason=six.b("")): """ send close data to the server. status: status code to send. see STATUS_XXX. reason: the reason to close. This must be string or bytes. """ if status < 0 or status >= ABNF.LENGTH_16: raise ValueError("code is invalid range") self.connected = False self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE) def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3): """ Close Websocket object status: status code to send. see STATUS_XXX. reason: the reason to close. This must be string. timeout: timeout until receive a close frame. If None, it will wait forever until receive a close frame. """ if self.connected: if status < 0 or status >= ABNF.LENGTH_16: raise ValueError("code is invalid range") try: self.connected = False self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE) sock_timeout = self.sock.gettimeout() self.sock.settimeout(timeout) start_time = time.time() while timeout is None or time.time() - start_time < timeout: try: frame = self.recv_frame() if frame.opcode != ABNF.OPCODE_CLOSE: continue if isEnabledForError(): recv_status = struct.unpack("!H", frame.data[0:2])[0] if recv_status != STATUS_NORMAL: error("close status: " + repr(recv_status)) break except: break self.sock.settimeout(sock_timeout) self.sock.shutdown(socket.SHUT_RDWR) except: pass self.shutdown() def abort(self): """ Low-level asynchronous abort, wakes up other threads that are waiting in recv_* """ if self.connected: self.sock.shutdown(socket.SHUT_RDWR) def shutdown(self): """close socket, immediately.""" if self.sock: self.sock.close() self.sock = None self.connected = False def _send(self, data): return send(self.sock, data) def _recv(self, bufsize): try: return recv(self.sock, bufsize) except WebSocketConnectionClosedException: if self.sock: self.sock.close() self.sock = None self.connected = False raise def create_connection(url, timeout=None, class_=WebSocket, **options): """ connect to url and return websocket object. Connect to url and return the WebSocket object. Passing optional timeout parameter will set the timeout on the socket. If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used. You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> conn = create_connection("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" class_: class to instantiate when creating the connection. It has to implement settimeout and connect. It's __init__ should be compatible with WebSocket.__init__, i.e. accept all of it's kwargs. options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "enable_multithread" -> enable lock for multithread. "redirect_limit" -> number of redirects to follow. "sockopt" -> socket options "sslopt" -> ssl option "subprotocols" - array of available sub protocols. default is None. "skip_utf8_validation" - skip utf8 validation. "socket" - pre-initialized stream socket. """ sockopt = options.pop("sockopt", []) sslopt = options.pop("sslopt", {}) fire_cont_frame = options.pop("fire_cont_frame", False) enable_multithread = options.pop("enable_multithread", False) skip_utf8_validation = options.pop("skip_utf8_validation", False) websock = class_(sockopt=sockopt, sslopt=sslopt, fire_cont_frame=fire_cont_frame, enable_multithread=enable_multithread, skip_utf8_validation=skip_utf8_validation, **options) websock.settimeout(timeout if timeout is not None else getdefaulttimeout()) websock.connect(url, **options) return websock
youtube/cobalt
third_party/websocket-client/websocket/_core.py
Python
bsd-3-clause
17,905
from gpiozero import Servo from time import sleep servo = Servo(17) while True: servo.min() sleep(2) servo.mid() sleep(2) servo.max() sleep(2)
RPi-Distro/python-gpiozero
docs/examples/servo_1.py
Python
bsd-3-clause
169
# -*- coding: utf-8 -*- """ .. _ex-ssp-proj: ================================== Sensitivity map of SSP projections ================================== This example shows the sources that have a forward field similar to the first SSP vector correcting for ECG. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD-3-Clause # %% import matplotlib.pyplot as plt from mne import read_forward_solution, read_proj, sensitivity_map from mne.datasets import sample print(__doc__) data_path = sample.data_path() subjects_dir = data_path / 'subjects' meg_path = data_path / 'MEG' / 'sample' fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' ecg_fname = meg_path / 'sample_audvis_ecg-proj.fif' fwd = read_forward_solution(fname) projs = read_proj(ecg_fname) # take only one projection per channel type projs = projs[::2] # Compute sensitivity map ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle') # %% # Show sensitivity map plt.hist(ssp_ecg_map.data.ravel()) plt.show() args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7, hemi='rh', subjects_dir=subjects_dir) ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
mne-tools/mne-python
examples/visualization/ssp_projs_sensitivity_map.py
Python
bsd-3-clause
1,240
# -*- coding: utf-8 -*- # # gandalf documentation build configuration file, created by # sphinx-quickstart on Mon Jan 14 20:44:50 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'gandalf' copyright = u'2013, globo.com' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'gandalfdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'gandalf.tex', u'gandalf Documentation', u'globo.com', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'gandalf', u'gandalf Documentation', [u'globo.com'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'gandalf', u'gandalf Documentation', u'globo.com', 'gandalf', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
marcelometal/gandalf
docs/source/conf.py
Python
bsd-3-clause
7,702
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = ['library_visual_scenes', 'visual_scene'] attrName = 'id' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], []) if (self.__assistant.GetResults() == False): self.status_baseline = False return False # Check for preservation of element self.__assistant.AttributePreserved(context, self.tagList, self.attrName) self.status_baseline = self.__assistant.DeferJudgement(context) return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
KhronosGroup/COLLADA-CTS
StandardDataSets/1_5/collada/library_visual_scenes/visual_scene/id/id_underscore/id_underscore.py
Python
mit
3,780
from .xcode import XcodePlatform import platform class iOSPlatform(XcodePlatform): def __init__(self, parameters={}): XcodePlatform.__init__(self, parameters) @staticmethod def identifier(): return 'ios' @staticmethod def sdk(): return 'iphoneos' @staticmethod def os_name(): return 'ios' @staticmethod def minimum_version(): return '7.0' @staticmethod def add_arguments(parser): parser.add_argument('--minimum-ios-version', default=iOSPlatform.minimum_version(), help='the minimum iOS version to build for') @staticmethod def detection_macro(architecture): if architecture == 'arm64': return 'TARGET_OS_IOS && !TARGET_OS_SIMULATOR && __LP64__' elif architecture == 'armv7': return 'TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !__LP64__' return None def default_architecture(self): return 'arm64' class iOSSimulatorPlatform(iOSPlatform): @staticmethod def identifier(): return 'iossimulator' @staticmethod def sdk(): return 'iphonesimulator' @staticmethod def add_arguments(parser): pass @staticmethod def detection_macro(architecture): if architecture == 'x86_64': return 'TARGET_OS_IOS && TARGET_OS_SIMULATOR && __LP64__' elif architecture == 'i386': return 'TARGET_OS_IOS && TARGET_OS_SIMULATOR && !__LP64__' return None def default_architecture(self): return platform.machine().lower()
vmrob/needy
needy/platforms/ios.py
Python
mit
1,607
''' Created on 2013-4-21 @author: Xsank ''' import os import re import tokenize from exception import TemplateError from util import tou,abort,html_escape from config import TEMPLATES,TEMPLATE_PATH,DEBUG class BaseTemplate(object): extentions = ['tpl','html'] settings = {} defaults = {} def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = map(os.path.abspath, lookup) self.encoding = encoding self.settings = self.settings.copy() self.settings.update(settings) if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=[]): if os.path.isfile(name): return name for spath in lookup: fname = os.path.join(spath, name) if os.path.isfile(fname): return fname for ext in cls.extentions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): if args: cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): raise NotImplementedError def render(self, **args): raise NotImplementedError class SimpleTemplate(BaseTemplate): blocks = ('if','elif','else','try','except','finally','for','while','with','def','class') dedent_blocks = ('elif', 'else', 'except', 'finally') def prepare(self, escape_func=html_escape, noescape=False): self.cache = {} if self.source: self.code = self.translate(self.source) self.co = compile(self.code, '<string>', 'exec') else: self.code = self.translate(open(self.filename).read()) self.co = compile(self.code, self.filename, 'exec') enc = self.encoding self._str = lambda x: tou(x, enc) #use html escape ,but something infect did wrong #self._escape = lambda x: escape_func(tou(x, enc)) self._escape=lambda x:tou(x,enc) if noescape: self._str, self._escape = self._escape, self._str def translate(self, template): stack = [] lineno = 0 ptrbuffer = [] codebuffer = [] oneline=multiline = dedent = False def yield_tokens(line): for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)): if i % 2: if part.startswith('!'): yield 'RAW', part[1:] else: yield 'CMD', part else: yield 'TXT', part def split_comment(codeline): line = codeline.splitlines()[0] try: tokens = list(tokenize.generate_tokens(iter(line).next)) except tokenize.TokenError: return line.rsplit('#',1) if '#' in line else (line, '') for token in tokens: if token[0] == tokenize.COMMENT: start, end = token[2][1], token[3][1] return codeline[:start] + codeline[end:], codeline[start:end] return line, '' def flush(): if not ptrbuffer: return cline = '' for line in ptrbuffer: for token, value in line: if token == 'TXT': cline += repr(value) elif token == 'RAW': cline += '_str(%s)' % value elif token == 'CMD': cline += '_escape(%s)' % value cline += ', ' cline = cline[:-2] + '\\\n' cline = cline[:-2] if cline[:-1].endswith('\\\\\\\\\\n'): cline = cline[:-7] + cline[-1] cline = '_printlist([' + cline + '])' del ptrbuffer[:] code(cline) def code(stmt): for line in stmt.splitlines(): codebuffer.append(' ' * len(stack) + line.strip()) for line in template.splitlines(True): lineno += 1 line = line if isinstance(line, unicode)\ else unicode(line, encoding=self.encoding) if lineno <= 2: m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line) if m: self.encoding = m.group(1) if m: line = line.replace('coding','coding (removed)') if line.strip()[:2].count('%') == 1: line = line.split('%',1)[1].lstrip() cline = split_comment(line)[0].strip() cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0] flush() if cmd in self.blocks or multiline: cmd = multiline or cmd dedent = cmd in self.dedent_blocks if dedent and not oneline and not multiline: cmd = stack.pop() code(line) oneline = not cline.endswith(':') multiline = cmd if cline.endswith('\\') else False if not oneline and not multiline: stack.append(cmd) elif cmd == 'end' and stack: code('#end(%s) %s' % (stack.pop(), line.strip()[3:])) elif cmd == 'include': p = cline.split(None, 2)[1:] if len(p) == 2: code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1])) elif p: code("_=_include(%s, _stdout)" % repr(p[0])) else: code("_printlist(_base)") elif cmd == 'rebase': p = cline.split(None, 2)[1:] if len(p) == 2: code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1])) elif p: code("globals()['_rebase']=(%s, {})" % repr(p[0])) else: code(line) else: if line.strip().startswith('%%'): line = line.replace('%%', '%', 1) ptrbuffer.append(yield_tokens(line)) flush() return '\n'.join(codebuffer) + '\n' def subtemplate(self, _name, _stdout, **args): if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(_stdout, **args) def execute(self, _stdout, **args): #_stdout change the html code env = self.defaults.copy() env.update({'_stdout': _stdout, '_printlist': _stdout.extend, '_include': self.subtemplate, '_str': self._str, '_escape': self._escape}) env.update(args) eval(self.co, env) if '_rebase' in env: subtpl, rargs = env['_rebase'] subtpl = self.__class__(name=subtpl, lookup=self.lookup) rargs['_base'] = _stdout[:] del _stdout[:] return subtpl.execute(_stdout, **rargs) return env def render(self, **args): stdout = [] try: #execute error self.execute(stdout, **args) except Exception,e: print e finally: return ''.join(stdout) def template(tpl, template_adapter=SimpleTemplate, **kwargs): if tpl not in TEMPLATES or DEBUG: settings = kwargs.get('template_settings',{}) lookup = kwargs.get('template_lookup', TEMPLATE_PATH) if isinstance(tpl, template_adapter): TEMPLATES[tpl] = tpl if settings: TEMPLATES[tpl].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tpl]: abort(500, 'Template (%s) not found' % tpl) return TEMPLATES[tpl].render(**kwargs)
jude90/bottle
brick/template.py
Python
mit
8,543
#!/usr/bin/env python import os os.system('clear') banner() runlocally() print colorblue.format('Open multiple tabs in Firefox with:') print print "1. List containing IPs and/or URLs." print "2. Directories from a domain\'s robot.txt." print "3. Previous menu" print choice = raw_input('Choice: ') if choice == "1": print location = raw_input('Enter the location of your list: ') if os.path.isfile(location): f = open(location,'r') # Setup a read connection directory filedata = f.read() # Read the file f.close() # Close the connection filedata = filedata.split('\n') # Turn into a list filedata = [x for x in filedata if not x == ""] # Ignore blank lines port = raw_input('Port: ') if port.isdigit(): if int(port) in range(1,65535): if port == "21": for i in filedata: webbrowser.open('ftp://'+i) time.sleep(1) elif port == "80": for i in filedata: webbrowser.open('http://'+i) time.sleep(1) elif port == "443": for i in filedata: webbrowser.open('https://'+i) time.sleep(1) else: for i in filedata: webbrowser.open('http://'+i+':'+port) time.sleep(1) else: error() else: error() else: error() if choice == "2": print print line print print 'Usage: target.com or target-IP' print domain = raw_input('Domain: ') # Check for no answer if domain == "": error() response = urllib2.urlopen('http://'+domain+'/robots.txt') robots = response.read() robots = robots.split('\n') for i in robots: if 'Disallow' in i: j = i.split(' ') f = open(os.path.expanduser('~')+'/'+domain+'-robots.txt','a') f.write(j[1]+'\n') f.close() webbrowser.open('http://'+domain+j[1]) time.sleep(1) print print line print print '***Scan complete.***' print print 'The new report is located at /'+user+'/'+domain+'-robots.txt' print print sys.exit(0) if choice == "3": main() else: error()
leebaird/discover
notes/python/discover/web/multitabs.py
Python
mit
2,600
import frappe from frappe import _ import frappe.www.list no_cache = 1 def get_context(context): if frappe.session.user == 'Guest': frappe.throw(_("You need to be logged in to access this page"), frappe.PermissionError) active_tokens = frappe.get_all("OAuth Bearer Token", filters=[["user", "=", frappe.session.user]], fields=["client"], distinct=True, order_by="creation") client_apps = [] for token in active_tokens: creation = get_first_login(token.client) app = { "name": token.get("client"), "app_name": frappe.db.get_value("OAuth Client", token.get("client"), "app_name"), "creation": creation } client_apps.append(app) app = None if "app" in frappe.form_dict: app = frappe.get_doc("OAuth Client", frappe.form_dict.app) app = app.__dict__ app["client_secret"] = None if app: context.app = app context.apps = client_apps context.show_sidebar = True def get_first_login(client): login_date = frappe.get_all("OAuth Bearer Token", filters=[["user", "=", frappe.session.user], ["client", "=", client]], fields=["creation"], order_by="creation", limit=1) login_date = login_date[0].get("creation") if login_date and len(login_date) > 0 else None return login_date @frappe.whitelist() def delete_client(client_id): active_client_id_tokens = frappe.get_all("OAuth Bearer Token", filters=[["user", "=", frappe.session.user], ["client","=", client_id]]) for token in active_client_id_tokens: frappe.delete_doc("OAuth Bearer Token", token.get("name"), ignore_permissions=True)
frappe/frappe
frappe/www/third_party_apps.py
Python
mit
1,546
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ml jobs showlogs command.""" from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.logs import stream from googlecloudsdk.command_lib.ml import flags from googlecloudsdk.command_lib.ml import log_utils class StreamLogs(base.Command): """Show logs from a running Cloud ML job.""" @staticmethod def Args(parser): """Register flags for this command.""" flags.JOB_NAME.AddToParser(parser) flags.POLLING_INTERVAL.AddToParser(parser) flags.ALLOW_MULTILINE_LOGS.AddToParser(parser) flags.TASK_NAME.AddToParser(parser) def Run(self, args): """Run the stream-logs command.""" log_fetcher = stream.LogFetcher( filters=log_utils.LogFilters(args.job, args.task_name), polling_interval=args.polling_interval, continue_func=log_utils.MakeContinueFunction(args.job)) return log_utils.SplitMultiline( log_fetcher.YieldLogs(), allow_multiline=args.allow_multiline_logs) def Format(self, args): """Returns the default formatting for the command. This overrides the base.Command method of the same name. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ return log_utils.LOG_FORMAT
Sorsly/subtle
google-cloud-sdk/lib/surface/ml/jobs/stream_logs.py
Python
mit
1,916
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup except ImportError: from distutils.core import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest pytest.main(self.test_args) with open('README.md') as readme_file: readme = readme_file.read() test_requirements = [ 'pytest' ] setup( name='rlp', version='0.3.8', description="A package for encoding and decoding data in and from Recursive Length Prefix notation", long_description=readme, author="jnnk", author_email='jnnknnj@gmail.com', url='https://github.com/ethereum/pyrlp', packages=[ 'rlp', 'rlp.sedes' ], include_package_data=True, install_requires=[], license="MIT", zip_safe=False, keywords='rlp ethereum', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], cmdclass={'test': PyTest}, tests_require=test_requirements )
holiman/pyrlp
setup.py
Python
mit
1,588
if __name__ == '__main__': import basic_client raise SystemExit(basic_client.main()) from sys import stdout from twisted.python.log import startLogging, err from twisted.protocols.amp import AMP from twisted.internet import reactor from twisted.internet.protocol import Factory from twisted.internet.endpoints import TCP4ClientEndpoint def connect(): endpoint = TCP4ClientEndpoint(reactor, "127.0.0.1", 8750) return endpoint.connect(Factory.forProtocol(AMP)) def main(): startLogging(stdout) d = connect() d.addErrback(err, "Connection failed") def done(ignored): reactor.stop() d.addCallback(done) reactor.run()
EricMuller/mynotes-backend
requirements/twisted/Twisted-17.1.0/docs/core/howto/listings/amp/basic_client.py
Python
mit
669
#!/usr/bin/python # Copyright (c) 2011 Jason Hancock <jsnbyh@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. try: from setuptools import setup except ImportError: from distutils.core import setup setup( name = 'CloudStack', version = '0.1', description = "CloudStack API v2.2 Client", long_description = "Python interface CloudStack v2.2 API", author = "Jason Hancock", author_email = "jsnbyh@gmail.com", url = "https://github.com/jasonhancock/cloudstack-python-client", packages = [ 'CloudStack' ], license = 'MIT', platforms = 'Posix; MacOS X; Windows', )
jasonhancock/cloudstack-python-client
setup.py
Python
mit
1,693
# coding: utf-8 from __future__ import unicode_literals from datetime import date from boundaries.models import BoundarySet from boundaries.tests import ViewTestCase, ViewsTests, PrettyTests class BoundarySetDetailTestCase(ViewTestCase, ViewsTests, PrettyTests): maxDiff = None url = '/boundary-sets/inc/' json = { 'domain': '', 'licence_url': '', 'end_date': None, 'name_singular': '', 'extra': {}, 'notes': '', 'authority': '', 'source_url': '', 'name_plural': '', 'extent': None, 'last_updated': '2000-01-01', 'start_date': None, 'related': { 'boundaries_url': '/boundaries/inc/' }, } def setUp(self): BoundarySet.objects.create(slug='inc', last_updated=date(2000, 1, 1)) def test_404(self): response = self.client.get('/boundary-sets/nonexistent/') self.assertNotFound(response)
datamade/represent-boundaries
boundaries/tests/test_boundary_set_detail.py
Python
mit
962
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """Wrapper for netCDF readers.""" from __future__ import unicode_literals, division, print_function import os.path import warnings import numpy as np from collections import OrderedDict from monty.dev import requires from monty.collections import AttrDict from monty.functools import lazy_property from monty.string import marquee from pymatgen.core.units import ArrayWithUnit from pymatgen.core.xcfunc import XcFunc from pymatgen.core.structure import Structure import logging logger = logging.getLogger(__name__) __author__ = "Matteo Giantomassi" __copyright__ = "Copyright 2013, The Materials Project" __version__ = "0.1" __maintainer__ = "Matteo Giantomassi" __email__ = "gmatteo at gmail.com" __status__ = "Development" __date__ = "$Feb 21, 2013M$" __all__ = [ "as_ncreader", "as_etsfreader", "NetcdfReader", "ETSF_Reader", "NO_DEFAULT", "structure_from_ncdata", ] try: import netCDF4 except ImportError as exc: netCDF4 = None warnings.warn("""\ `import netCDF4` failed with the following error: %s Please install netcdf4 with `conda install netcdf4` If the conda version does not work, uninstall it with `conda uninstall hdf4 hdf5 netcdf4` and use `pip install netcdf4`""" % str(exc)) def _asreader(file, cls): closeit = False if not isinstance(file, cls): file, closeit = cls(file), True return file, closeit def as_ncreader(file): """ Convert file into a NetcdfReader instance. Returns reader, closeit where closeit is set to True if we have to close the file before leaving the procedure. """ return _asreader(file, NetcdfReader) def as_etsfreader(file): return _asreader(file, ETSF_Reader) class NetcdfReaderError(Exception): """Base error class for NetcdfReader""" class NO_DEFAULT(object): """Signal that read_value should raise an Error""" class NetcdfReader(object): """ Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements. Additional documentation available at: http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html """ Error = NetcdfReaderError @requires(netCDF4 is not None, "netCDF4 must be installed to use this class") def __init__(self, path): """Open the Netcdf file specified by path (read mode).""" self.path = os.path.abspath(path) try: self.rootgrp = netCDF4.Dataset(self.path, mode="r") except Exception as exc: raise self.Error("In file %s: %s" % (self.path, str(exc))) self.ngroups = len(list(self.walk_tree())) #self.path2group = OrderedDict() #for children in self.walk_tree(): # for child in children: # #print(child.group, child.path) # self.path2group[child.path] = child.group def __enter__(self): """Activated when used in the with statement.""" return self def __exit__(self, type, value, traceback): """Activated at the end of the with statement. It automatically closes the file.""" self.rootgrp.close() def close(self): try: self.rootgrp.close() except Exception as exc: logger.warning("Exception %s while trying to close %s" % (exc, self.path)) def walk_tree(self, top=None): """ Navigate all the groups in the file starting from top. If top is None, the root group is used. """ if top is None: top = self.rootgrp values = top.groups.values() yield values for value in top.groups.values(): for children in self.walk_tree(value): yield children def print_tree(self): for children in self.walk_tree(): for child in children: print(child) def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT): """ Returns the value of a dimension. Args: dimname: Name of the variable path: path to the group. default: return `default` if `dimname` is not present and `default` is not `NO_DEFAULT` else raise self.Error. """ try: dim = self._read_dimensions(dimname, path=path)[0] return len(dim) except self.Error: if default is NO_DEFAULT: raise return default def read_varnames(self, path="/"): """List of variable names stored in the group specified by path.""" if path == "/": return self.rootgrp.variables.keys() else: group = self.path2group[path] return group.variables.keys() def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT): """ Returns the values of variable with name varname in the group specified by path. Args: varname: Name of the variable path: path to the group. cmode: if cmode=="c", a complex ndarrays is constructed and returned (netcdf does not provide native support from complex datatype). default: returns default if varname is not present. self.Error is raised if default is default is NO_DEFAULT Returns: numpy array if varname represents an array, scalar otherwise. """ try: var = self.read_variable(varname, path=path) except self.Error: if default is NO_DEFAULT: raise return default if cmode is None: # scalar or array # getValue is not portable! try: return var.getValue()[0] if not var.shape else var[:] except IndexError: return var.getValue() if not var.shape else var[:] else: assert var.shape[-1] == 2 if cmode == "c": return var[...,0] + 1j*var[...,1] else: raise ValueError("Wrong value for cmode %s" % cmode) def read_variable(self, varname, path="/"): """Returns the variable with name varname in the group specified by path.""" return self._read_variables(varname, path=path)[0] def _read_dimensions(self, *dimnames, **kwargs): path = kwargs.get("path", "/") try: if path == "/": return [self.rootgrp.dimensions[dname] for dname in dimnames] else: group = self.path2group[path] return [group.dimensions[dname] for dname in dimnames] except KeyError: raise self.Error("In file %s:\nError while reading dimensions: `%s` with kwargs: `%s`" % (self.path, dimnames, kwargs)) def _read_variables(self, *varnames, **kwargs): path = kwargs.get("path", "/") try: if path == "/": return [self.rootgrp.variables[vname] for vname in varnames] else: group = self.path2group[path] return [group.variables[vname] for vname in varnames] except KeyError: raise self.Error("In file %s:\nError while reading variables: `%s` with kwargs `%s`." % (self.path, varnames, kwargs)) def read_keys(self, keys, dict_cls=AttrDict, path="/"): """ Read a list of variables/dimensions from file. If a key is not present the corresponding entry in the output dictionary is set to None. """ od = dict_cls() for k in keys: try: # Try to read a variable. od[k] = self.read_value(k, path=path) except self.Error: try: # Try to read a dimension. od[k] = self.read_dimvalue(k, path=path) except self.Error: od[k] = None return od class ETSF_Reader(NetcdfReader): """ This object reads data from a file written according to the ETSF-IO specifications. We assume that the netcdf file contains at least the crystallographic section. """ @lazy_property def chemical_symbols(self): """Chemical symbols char [number of atom species][symbol length].""" charr = self.read_value("chemical_symbols") symbols = [] for v in charr: s = "".join(c.decode("utf-8") for c in v) symbols.append(s.strip()) return symbols def typeidx_from_symbol(self, symbol): """Returns the type index from the chemical symbol. Note python convention.""" return self.chemical_symbols.index(symbol) def read_structure(self, cls=Structure): """Returns the crystalline structure.""" if self.ngroups != 1: raise NotImplementedError("In file %s: ngroups != 1" % self.path) return structure_from_ncdata(self, cls=cls) def read_abinit_xcfunc(self): """ Read ixc from an Abinit file. Return :class:`XcFunc` object. """ ixc = int(self.read_value("ixc")) return XcFunc.from_abinit_ixc(ixc) def read_abinit_hdr(self): """ Read the variables associated to the Abinit header. Return :class:`AbinitHeader` """ d = {} for hvar in _HDR_VARIABLES.values(): ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name if ncname in self.rootgrp.variables: d[hvar.name] = self.read_value(ncname) elif ncname in self.rootgrp.dimensions: d[hvar.name] = self.read_dimvalue(ncname) else: raise ValueError("Cannot find `%s` in `%s`" % (ncname, self.path)) # Convert scalars to (well) scalars. if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape: d[hvar.name] = np.asscalar(d[hvar.name]) if hvar.name in ("title", "md5_pseudos", "codvsn"): # Convert array of numpy bytes to list of strings if hvar.name == "codvsn": d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name]) else: d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip() for astr in d[hvar.name]] return AbinitHeader(d) def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): """ Reads and returns a pymatgen structure from a NetCDF file containing crystallographic data in the ETSF-IO format. Args: ncdata: filename or NetcdfReader instance. site_properties: Dictionary with site properties. cls: The Structure class to instanciate. """ ncdata, closeit = as_ncreader(ncdata) # TODO check whether atomic units are used lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang") red_coords = ncdata.read_value("reduced_atom_positions") natom = len(red_coords) znucl_type = ncdata.read_value("atomic_numbers") # type_atom[0:natom] --> index Between 1 and number of atom species type_atom = ncdata.read_value("atom_species") # Fortran to C index and float --> int conversion. species = natom * [None] for atom in range(natom): type_idx = type_atom[atom] - 1 species[atom] = int(znucl_type[type_idx]) d = {} if site_properties is not None: for prop in site_properties: d[property] = ncdata.read_value(prop) structure = cls(lattice, species, red_coords, site_properties=d) # Quick and dirty hack. # I need an abipy structure since I need to_abivars and other methods. try: from abipy.core.structure import Structure as AbipyStructure structure.__class__ = AbipyStructure except ImportError: pass if closeit: ncdata.close() return structure class _H(object): __slots__ = ["name", "doc", "etsf_name"] def __init__(self, name, doc, etsf_name=None): self.name, self.doc, self.etsf_name = name, doc, etsf_name _HDR_VARIABLES = ( # Scalars _H("bantot", "total number of bands (sum of nband on all kpts and spins)"), _H("date", "starting date"), _H("headform", "format of the header"), _H("intxc", "input variable"), _H("ixc", "input variable"), _H("mband", "maxval(hdr%nband)", etsf_name="max_number_of_states"), _H("natom", "input variable", etsf_name="number_of_atoms"), _H("nkpt", "input variable", etsf_name="number_of_kpoints"), _H("npsp", "input variable"), _H("nspden", "input variable", etsf_name="number_of_components"), _H("nspinor", "input variable", etsf_name="number_of_spinor_components"), _H("nsppol", "input variable", etsf_name="number_of_spins"), _H("nsym", "input variable", etsf_name="number_of_symmetry_operations"), _H("ntypat", "input variable", etsf_name="number_of_atom_species"), _H("occopt", "input variable"), _H("pertcase", "the index of the perturbation, 0 if GS calculation"), _H("usepaw", "input variable (0=norm-conserving psps, 1=paw)"), _H("usewvl", "input variable (0=plane-waves, 1=wavelets)"), _H("kptopt", "input variable (defines symmetries used for k-point sampling)"), _H("pawcpxocc", "input variable"), _H("nshiftk_orig", "original number of shifts given in input (changed in inkpts, the actual value is nshiftk)"), _H("nshiftk", "number of shifts after inkpts."), _H("icoulomb", "input variable."), _H("ecut", "input variable", etsf_name="kinetic_energy_cutoff"), _H("ecutdg", "input variable (ecut for NC psps, pawecutdg for paw)"), _H("ecutsm", "input variable"), _H("ecut_eff", "ecut*dilatmx**2 (dilatmx is an input variable)"), _H("etot", "EVOLVING variable"), _H("fermie", "EVOLVING variable", etsf_name="fermi_energy"), _H("residm", "EVOLVING variable"), _H("stmbias", "input variable"), _H("tphysel", "input variable"), _H("tsmear", "input variable"), _H("nelect", "number of electrons (computed from pseudos and charge)"), _H("charge", "input variable"), # Arrays _H("qptn", "qptn(3) the wavevector, in case of a perturbation"), #_H("rprimd", "rprimd(3,3) EVOLVING variables", etsf_name="primitive_vectors"), #_H(ngfft, "ngfft(3) input variable", number_of_grid_points_vector1" #_H("nwvlarr", "nwvlarr(2) the number of wavelets for each resolution.", etsf_name="number_of_wavelets"), _H("kptrlatt_orig", "kptrlatt_orig(3,3) Original kptrlatt"), _H("kptrlatt", "kptrlatt(3,3) kptrlatt after inkpts."), _H("istwfk", "input variable istwfk(nkpt)"), _H("lmn_size", "lmn_size(npsp) from psps"), _H("nband", "input variable nband(nkpt*nsppol)", etsf_name="number_of_states"), _H("npwarr", "npwarr(nkpt) array holding npw for each k point", etsf_name="number_of_coefficients"), _H("pspcod", "pscod(npsp) from psps"), _H("pspdat", "psdat(npsp) from psps"), _H("pspso", "pspso(npsp) from psps"), _H("pspxc", "pspxc(npsp) from psps"), _H("so_psp", "input variable so_psp(npsp)"), _H("symafm", "input variable symafm(nsym)"), #_H(symrel="input variable symrel(3,3,nsym)", etsf_name="reduced_symmetry_matrices"), _H("typat", "input variable typat(natom)", etsf_name="atom_species"), _H("kptns", "input variable kptns(nkpt, 3)", etsf_name="reduced_coordinates_of_kpoints"), _H("occ", "EVOLVING variable occ(mband, nkpt, nsppol)", etsf_name="occupations"), _H("tnons", "input variable tnons(nsym, 3)", etsf_name="reduced_symmetry_translations"), _H("wtk", "weight of kpoints wtk(nkpt)", etsf_name="kpoint_weights"), _H("shiftk_orig", "original shifts given in input (changed in inkpts)."), _H("shiftk", "shiftk(3,nshiftk), shiftks after inkpts"), _H("amu", "amu(ntypat) ! EVOLVING variable"), #_H("xred", "EVOLVING variable xred(3,natom)", etsf_name="reduced_atom_positions"), _H("zionpsp", "zionpsp(npsp) from psps"), _H("znuclpsp", "znuclpsp(npsp) from psps. Note the difference between (znucl|znucltypat) and znuclpsp"), _H("znucltypat", "znucltypat(ntypat) from alchemy", etsf_name="atomic_numbers"), _H("codvsn", "version of the code"), _H("title", "title(npsp) from psps"), _H("md5_pseudos", "md5pseudos(npsp), md5 checksums associated to pseudos (read from file)"), #_H(type(pawrhoij_type), allocatable :: pawrhoij(:) ! EVOLVING variable, only for paw ) _HDR_VARIABLES = OrderedDict([(h.name, h) for h in _HDR_VARIABLES]) class AbinitHeader(AttrDict): """Stores the values reported in the Abinit header.""" #def __init__(self, *args, **kwargs): # super(AbinitHeader, self).__init__(*args, **kwargs) # for k, v in self.items(): # v.__doc__ = _HDR_VARIABLES[k].doc def __str__(self): return self.to_string() def to_string(self, verbose=0, title=None, **kwargs): """ String representation. kwargs are passed to `pprint.pformat`. Args: verbose: Verbosity level title: Title string. """ from pprint import pformat s = pformat(self, **kwargs) if title is not None: return "\n".join([marquee(title, mark="="), s]) return s
gpetretto/pymatgen
pymatgen/io/abinit/netcdf.py
Python
mit
17,297
#!/usr/bin/python # -*- coding: utf-8 -*- import sys, os, multiprocessing, subprocess root_path = os.path.abspath(os.path.join(os.path.dirname(__file__))) resources = os.path.abspath(os.path.join(os.path.dirname(__file__), 'res')) gen_resources = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src/sakia')) def convert_ui(args, **kwargs): subprocess.call(args, **kwargs) def build_resources(): try: to_process = [] for root, dirs, files in os.walk(root_path): for f in files: if f.endswith('.ui'): source = os.path.join(root, f) dest = os.path.join(root, os.path.splitext(os.path.basename(source))[0]+'_uic.py') exe = 'pyuic5' elif f.endswith('.qrc'): source = os.path.join(root, f) filename = os.path.splitext(os.path.basename(source))[0] # we remove "sakia." from the rc filename # its only named like this so that imports are corrects in uic files dest = os.path.join(gen_resources, filename.replace('sakia.', '')+'_rc.py') exe = 'pyrcc5' else: continue print(source + " >> " + dest) to_process.append([exe, '-o', dest, source]) if sys.platform.startswith('win'): # doing this in parallel on windows will crash your computer [convert_ui(args, shell=True) for args in to_process] else: pool = multiprocessing.Pool() pool.map(convert_ui, to_process) except EnvironmentError: print("""\ Warning: PyQt5 development utilities (pyuic5 and pyrcc5) not found Unable to install praxes' graphical user interface """) build_resources()
ucoin-io/cutecoin
gen_resources.py
Python
mit
1,836
# encoding: UTF-8 from __future__ import print_function from __future__ import absolute_import from time import sleep from .vnfxcm import FxcmApi url = 'https://api-demo.fxcm.com:443' port = 443 token = '48055b5d9afac0a300143ac067ce04cd2430a434' proxy = 'https://localhost:1080' api = FxcmApi() print('api created') api.connect(url, port, token, proxy) print(api.bearer) sleep(20) #api.getInstruments() #api.subscribe('EUR/USD') #api.subscribe('USD/JPY') #api.subscribe('GBP/USD') #api.getModel('Summary') #api.subscribeModel('Summary') #api.getModel(api.MODEL_SUMMARY) #api.getModel(api.MODEL_OFFER) api.subscribeModel(api.MODEL_OFFER) api.updateSubscriptions('EUR/USD') api.subscribe('EUR/USD') #api.subscribeModel(api.MODEL_ACCOUNT) input()
harveywwu/vnpy
vnpy/api/fxcm/test.py
Python
mit
757
from celery import task from django.core.mail import send_mail from django.template import loader from django.contrib.auth.forms import PasswordResetForm from django.core.mail import EmailMultiAlternatives, send_mail @task() def add(): return 2 + 2 @task() def send_mail_old_user(email): send_mail('invitation for project', 'time to code', 'dineshmcmf@gmail.com', [email]) @task() def celery_send_mail(subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): """ Sends a django.core.mail.EmailMultiAlternatives to `to_email`. """ subject = loader.render_to_string(subject_template_name, context) subject = ''.join(subject.splitlines()) body = loader.render_to_string(email_template_name, context) email_message = EmailMultiAlternatives(subject, body, from_email, [to_email]) if html_email_template_name is not None: html_email = loader.render_to_string(html_email_template_name, context) email_message.attach_alternative(html_email, 'text/html') email_message.send()
druuu/pietrack
project/tasks.py
Python
mit
1,095
#################################### # Driftwood 2D Game Dev. Suite # # lightmanager.py # # Copyright 2014-2017 # # Michael D. Reiley & Paul Merrill # #################################### # ********** # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # ********** from typing import ItemsView, List, Optional from sdl2 import * import entity import filetype import light # Keep a reference to the light module, which is overridden by the LightManager.light function later in the file. # It is only overridden while inside type annotations. _light = light class LightManager: """The Light Manager This class manages lights in the current area. Attributes: driftwood: Base class instance. lights: The dictionary of Light class instances for each light. Stored by lid. """ def __init__(self, driftwood): """LightManager class initializer. Args: driftwood: Base class instance. """ self.driftwood = driftwood self.lights = {} self.__last_lid = -1 self.__area_light_layer = [None, None] def __contains__(self, lid: int) -> bool: if self.light(lid): return True return False def __getitem__(self, lid) -> Optional[light.Light]: return self.light(lid) def __delitem__(self, lid) -> bool: return self.kill(lid) def __iter__(self) -> ItemsView: return self.lights.items() def insert(self, filename: str, layer: int, x: int, y: int, w: int, h: int, color: str = "FFFFFFFF", blend: bool = False, entity: entity.Entity = None, layermod=0) -> Optional[int]: """Create and insert a new light into the area. Args: filename: Filename of the lightmap. layer: Layer of insertion. x: x-coordinate of insertion. y: y-coordinate of insertion. w: Width of the light. h: Height of the light. color: Hexadeximal color and alpha value of the light. "RRGGBBAA" blend: Whether to blend light instead of adding it. Useful for dark lights. entity: If set, eid of entity to track the light to. Disabled if None. layermod: Integer to add to the layer the light is drawn on when tracking an entity. Returns: Light ID of new light if succeeded, None if failed. """ # Input Check try: CHECK(filename, str) CHECK(layer, int, _min=0) CHECK(x, int) CHECK(y, int) CHECK(color, str, _equals=8) CHECK(blend, bool) if entity is not None: CHECK(entity, int, _min=0) CHECK(layermod, int) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "insert", "bad argument", e) return None # Try to request the lightmap image from the resource manager. lightmap = self.driftwood.resource.request_image(filename) if not lightmap: self.driftwood.log.msg("ERROR", "Light", "insert", "could not load lightmap", lightmap) return None # Is the color a real color? try: int(color, 16) except ValueError: self.driftwood.log.msg("ERROR", "Light", "insert", "invalid color", color) return None # Does the entity exist if we passed one to track? if entity is not None: if not self.driftwood.entity.entity(entity): self.driftwood.log.msg("ERROR", "Light", "insert", "cannot bind to nonexistent entity", entity) return None # Add our light to the dictionary and increment the light id count. self.__last_lid += 1 lid = self.__last_lid # Give the light transparency. alpha = int(color[6:8], 16) # Are we blending the light? blendmode = SDL_BLENDMODE_ADD if blend: blendmode = SDL_BLENDMODE_BLEND # Give the light color. colormod = ( int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16) ) # Assign the light to its light id in the dictionary. self.lights[lid] = light.Light(self, lid, lightmap, layer, x, y, w, h, alpha, blendmode, colormod, entity, layermod) # We are done. self.driftwood.log.info("Light", "inserted", "{0} on layer {1} at position {2}px, {3}px".format(filename, layer, x, y)) # The area has changed because we added a light. self.driftwood.area.changed = True return lid def light(self, lid: int) -> Optional[light.Light]: """Retrieve a light by lid. Args: lid: The Light ID of the light to retrieve. Returns: Light class instance if succeeded, None if failed. """ # Input Check try: CHECK(lid, int, _min=0) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "light", "bad argument", e) return None if lid in self.lights: return self.lights[lid] return None def light_at(self, x: int, y: int) -> Optional[_light.Light]: """Retrieve a light by pixel coordinate. Args: x: The x coordinate of the light. y: The y coordinate of the light. Returns: Light class instance if succeeded, None if failed. """ # Input Check try: CHECK(x, int) CHECK(y, int) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "light_at", "bad argument", e) return None for lid in self.lights: if self.lights[lid].x == x and self.lights[lid].y == y: return self.lights[lid] return None def layer(self, l: int) -> Optional[List[_light.Light]]: """Retrieve a list of lights on the specified layer. Args: l: The layer from which to list lights. Returns: Tuple of Light class instances if succeeded, None if failed. """ # Input Check try: CHECK(l, int, _min=0) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "layer", "bad argument", e) return None lights = [] for lid in self.lights: if self.lights[lid].layer == l: lights.append(self.lights[lid]) return lights def entity(self, eid: int) -> Optional[_light.Light]: """Retrieve a light by the entity it is bound to. Args: eid: The Entity ID of the entity to check for bound light. Returns: Light class instance if succeeded, None if failed. """ # Input Check try: CHECK(eid, int, _min=0) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "entity", "bad argument", e) return None for lid in self.lights: if self.lights[lid].entity == eid: return self.lights[lid] return None def set_color(self, lid: int, color: str, blend: bool = None) -> bool: """Update the color, alpha, and blending of an existing light. Args: lid: The Light ID of the light to update. color: Hexadecimal color and alpha value of the light. "RRGGBBAA" blend: If set, whether to blend light instead of adding it. Otherwise keep old value. Returns: True if succeeded, False if failed. """ # Input Check try: CHECK(lid, int, _min=0) CHECK(color, str, _equals=8) if blend is not None: CHECK(blend, bool) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "set_color", "bad argument", e) return False # Is it a real color? try: int(color, 16) except ValueError: self.driftwood.log.msg("ERROR", "Light", "set_color", "invalid color", color) return False # If the light exists, work on it. if lid in self.lights: success = True # Set the color. r = SDL_SetTextureColorMod(self.lights[lid].lightmap.texture, int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16)) if r < 0: self.driftwood.log.msg("ERROR", "Light", "set_color", "SDL", SDL_GetError()) success = False # Set the alpha. r = SDL_SetTextureAlphaMod(self.lights[lid].lightmap.texture, int(color[6:8], 16)) if r < 0: self.driftwood.log.msg("ERROR", "Light", "set_color", "SDL", SDL_GetError()) success = False # Are we going to blend the light? if blend is not None: if blend: r = SDL_SetTextureBlendMode(self.lights[lid].lightmap.texture, SDL_BLENDMODE_BLEND) else: r = SDL_SetTextureBlendMode(self.lights[lid].lightmap.texture, SDL_BLENDMODE_ADD) if r < 0: self.driftwood.log.msg("ERROR", "Light", "set_color", "SDL", SDL_GetError()) success = False # The area has changed because we changed the light. self.driftwood.area.changed = True if success: return True return False def area(self, filename, color="FFFFFFFF", blend=False): """Convenience function to apply a lightmap over an entire area. Be aware that this will create a new layer the first time it's run on an area. It will do so again if the area has been unfocused and refocused and its number of layers has changed back. Args: filename: Filename of the lightmap. color: Hexadeximal color and alpha value of the light. "RRGGBBAA" blend: Whether to blend light instead of adding it. Useful for dark lights. Returns: New light if succeeded, None if failed. """ layer = len(self.driftwood.area.tilemap.layers) - 1 if self.__area_light_layer[0] != self.driftwood.area.filename or self.__area_light_layer[1] != layer: self.driftwood.area.tilemap.new_layer() layer = len(self.driftwood.area.tilemap.layers) - 1 self.__area_light_layer = [self.driftwood.area.filename, layer] areasize = [self.driftwood.area.tilemap.width * self.driftwood.area.tilemap.tilewidth, self.driftwood.area.tilemap.height * self.driftwood.area.tilemap.tileheight] insertpos = [areasize[0] // 2, areasize[1] // 2] return self.driftwood.light.insert(filename, layer, insertpos[0], insertpos[1], areasize[0], areasize[1], color=color, blend=blend) def kill(self, lid: int) -> bool: """Kill a light by lid. Args: lid: The Light ID of the light to kill. Returns: True if succeeded, False if failed. """ # Input Check try: CHECK(lid, int, _min=0) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "kill", "bad argument", e) return False if lid in self.lights: del self.lights[lid] self.driftwood.area.changed = True return True self.driftwood.log.msg("WARNING", "Light", "kill", "attempt to kill nonexistent light", lid) return False def killall(self, file: str) -> bool: """Kill all lights by filename or lightmap. Args: file: Filename of the JSON light descriptor whose insertions should be killed, or ImageFile of the lightmap. Returns: True if succeeded, False if failed. """ # Input Check try: CHECK(file, str) except CheckFailure as e: self.driftwood.log.msg("ERROR", "Light", "killall", "bad argument", e) return False to_kill = [] # Collect a list of lights to kill, searching by lightmap ImageFile. if isinstance(file, filetype.ImageFile): for lid in self.lights: if self.lights[lid].lightmap == file: to_kill += lid # Collect a list of lights to kill, searching by lightmap filename. else: for lid in self.lights: if self.lights[lid].filename == file: to_kill += lid # Kill the lights in the list. for lid in to_kill: del self.lights[lid] self.driftwood.area.changed = True # Did we succeed? if to_kill: return True self.driftwood.log.msg("WARNING", "Light", "killall", "attempt to kill nonexistent lights", file) return False def reset(self) -> bool: """Reset the lights. Returns: True """ self.lights = {} self.driftwood.area.changed = True self.driftwood.log.info("Light", "reset") return True
Driftwood2D/Driftwood
src/lightmanager.py
Python
mit
14,606
import abc import copy import json import string class JSONObject(metaclass=abc.ABCMeta): @abc.abstractmethod def __to_json__(self): pass @staticmethod @abc.abstractclassmethod def from_json(action, selector): pass def to_instance(self, target): return copy.copy(self) def __from_json__(self, **kwargs): self.__init__(**kwargs) return self def eq(self, other): return str(self) == str(other) def __str__(self): return json.dumps(self.__to_json__(), default=lambda o: o.__to_json__(), sort_keys=True) class Tag(JSONObject): def __deepcopy__(self, memo): cls = self.__class__ new = cls.__new__(cls) memo[id(self)] = new for attribute, value in self.__dict__.items(): if attribute != "owner": setattr(new, attribute, copy.deepcopy(value, memo)) else: setattr(new, attribute, None) return new class Aura(Tag): def __init__(self, status, selector, condition=None, expires=False): self.owner = None self.status = status self.selector = selector self.condition = condition self.expires = expires def set_owner(self, owner): self.owner = owner def apply(self): if not self.condition or self.condition.evaluate(self.owner, self.owner): targets = self.selector.get_targets(self.owner) for target in targets: self.status.act(self.owner, target) def unapply(self): targets = self.selector.get_targets(self.owner) for target in targets: self.status.unact(self.owner, target) def match(self, obj): return (not self.condition or self.condition.evaluate(self.owner, self.owner)) and \ self.selector.match(self.owner, obj) def __to_json__(self): if self.condition: return { 'status': self.status, 'selector': self.selector, 'condition': self.condition, 'expires': self.expires, } return { 'status': self.status, 'selector': self.selector, 'expires': self.expires, } @staticmethod def from_json(status, selector, condition=None, expires=False): status = Status.from_json(**status) selector = Selector.from_json(**selector) if condition: condition = Condition.from_json(**condition) return Aura(status, selector, condition, expires) class Buff(Tag): def __init__(self, status, condition=None): self.status = status self.condition = condition self.owner = None def set_owner(self, owner): self.owner = owner def apply(self): if not self.condition or self.condition.evaluate(self.owner, self.owner): self.status.act(self.owner, self.owner) def unapply(self): self.status.unact(self.owner, self.owner) def is_minion(self): return False def to_instance(self, target): new_instance = copy.copy(self) new_instance.status = self.status.to_instance(target) return new_instance def __to_json__(self): if self.condition: return { 'status': self.status, 'condition': self.condition } return { 'status': self.status, } @staticmethod def from_json(status, condition=None): status = Status.from_json(**status) if condition: condition = Condition.from_json(**condition) return Buff(status, condition) class BuffUntil(Buff): def __init__(self, status, until): super().__init__(status) self.until = until def apply(self): super().apply() self.until.bind(self.owner, self.__until__) def unapply(self): self.until.unbind(self.owner, self.__until__) super().unapply() def __until__(self, *args): self.owner.remove_buff(self) def __to_json__(self): return { 'status': self.status, 'until': self.until } @staticmethod def from_json(status, until): status = Status.from_json(**status) until = Event.from_json(**until) return BuffUntil(status, until) class AuraUntil(Aura): def __init__(self, status, selector, until, expires=True): super().__init__(status, selector, None, expires) self.until = until def apply(self): super().apply() self.until.bind(self.owner, self.__until__) def unapply(self): self.until.unbind(self.owner, self.__until__) super().unapply() def __until__(self, *args): self.owner.player.remove_aura(self) def __to_json__(self): return { 'status': self.status, 'selector': self.selector, 'until': self.until, 'expires': self.expires } @staticmethod def from_json(status, selector, until, expires=True): status = Status.from_json(**status) selector = Selector.from_json(**selector) until = Event.from_json(**until) return AuraUntil(status, selector, until, expires) class Player(metaclass=abc.ABCMeta): @abc.abstractmethod def get_players(self, target): pass @abc.abstractmethod def match(self, source, obj): pass @staticmethod def from_json(name): from hearthbreaker.tags.selector import FriendlyPlayer, EnemyPlayer, BothPlayer, PlayerOne, \ PlayerTwo, CurrentPlayer, OtherPlayer if name == "friendly": return FriendlyPlayer() elif name == "enemy": return EnemyPlayer() elif name == "both": return BothPlayer() elif name == "player_one": return PlayerOne() elif name == "player_two": return PlayerTwo() elif name == "current_player": return CurrentPlayer() elif name == "other_player": return OtherPlayer() class Picker(JSONObject, metaclass=abc.ABCMeta): @abc.abstractmethod def pick(self, source, targets): pass @staticmethod def from_json(name, count=0): from hearthbreaker.tags.selector import UserPicker, AllPicker, RandomPicker if name == "user": return UserPicker() elif name == "all": return AllPicker() elif name == "random": return RandomPicker(count) else: raise TypeError("What are you even doing?") class Selector(JSONObject, metaclass=abc.ABCMeta): @abc.abstractmethod def get_targets(self, source, target=None): pass def choose_targets(self, source, target=None): return self.get_targets(source, target) @abc.abstractmethod def match(self, source, obj): pass @staticmethod def from_json(name, **kwargs): import hearthbreaker.tags.selector as selector_mod cls_name = string.capwords(name, '_').replace("_", "") + "Selector" cls = getattr(selector_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) class Action(JSONObject, metaclass=abc.ABCMeta): @abc.abstractmethod def act(self, actor, target, other=None): pass @staticmethod def from_json(name, **kwargs): import hearthbreaker.tags.action as action_mod cls_name = string.capwords(name, '_').replace("_", "") cls = getattr(action_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) class Status(JSONObject, metaclass=abc.ABCMeta): @abc.abstractmethod def act(self, actor, target): pass @abc.abstractmethod def unact(self, actor, target): pass @staticmethod def from_json(name, **kwargs): import hearthbreaker.tags.status as status_mod cls_name = string.capwords(name, '_').replace("_", "") cls = getattr(status_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) class Amount(abc.ABCMeta): def __init__(cls, name, bases, dct): super(Amount, cls).__init__(name, bases, dct) base_init = cls.__init__ base_to_json = cls.__to_json__ base_from_json = cls.__from_json__ base_to_instance = cls.to_instance def init_with_amount(self, amount=1, multiplier=1, **kwargs): self.amount = amount self.multipler = multiplier return base_init(self, **kwargs) def to_json_with_amount(self): js = base_to_json(self) if self.amount: js['amount'] = self.amount if self.multipler != 1: js['multiplier'] = self.multipler return js def from_json_with_amount(self, amount=1, multiplier=1, **kwargs): if amount: if isinstance(amount, dict): self.amount = Function.from_json(**amount) else: self.amount = amount self.multipler = multiplier if base_from_json is JSONObject.__from_json__: base_init(self, **kwargs) return self return base_from_json(self, **kwargs) def get_amount(self, source, target, *args): if isinstance(self.amount, Function): return self.amount.do(source, *args) * self.multipler else: return self.amount def to_instance(self, target): new_instance = base_to_instance(self, target) new_instance.amount = new_instance.get_amount(target, target) return new_instance cls.__init__ = init_with_amount cls.__to_json__ = to_json_with_amount cls.__from_json__ = from_json_with_amount cls.get_amount = get_amount cls.to_instance = to_instance class Event(JSONObject, metaclass=abc.ABCMeta): def __init__(self, event_name, condition=None): self.event_name = event_name self.condition = condition self.__func__ = None self.__target__ = None @abc.abstractmethod def bind(self, target, func): pass @abc.abstractmethod def unbind(self, target, func): pass def __action__(self, *args): if self.condition.evaluate(self.__target__, *args): self.__func__(*args) @staticmethod def from_json(event_name, **kwargs): import hearthbreaker.tags.event as event_mod cls_name = string.capwords(event_name, '_').replace("_", "") cls = getattr(event_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) def __deepcopy__(self, memo): cls = type(self) new = cls.__new__(cls) memo[id(self)] = new if self.condition: new.condition = copy.deepcopy(self.condition, memo) else: new.condition = None new.event_name = self.event_name return new def __from_json__(self, condition=None): if condition: condition = Condition.from_json(**condition) self.__init__(condition) else: self.__init__() return self def __to_json__(self): if self.condition: return { 'event_name': self.event_name, 'condition': self.condition } return { 'event_name': self.event_name } class MinionEvent(Event): def bind(self, target, func): if self.condition: self.__target__ = target self.__func__ = func target.bind(self.event_name, self.__action__) else: target.bind(self.event_name, func) def unbind(self, target, func): if self.condition: target.unbind(self.event_name, self.__action__) else: target.unbind(self.event_name, func) class PlayerEvent(Event): def __init__(self, event_name, condition, player): super().__init__(event_name, condition) self.player = player def bind(self, target, func): for player in self.player.get_players(target.player): if self.condition: self.__target__ = target self.__func__ = func player.bind(self.event_name, self.__action__) else: player.bind(self.event_name, func) def unbind(self, target, func): for player in self.player.get_players(target.player): if self.condition: player.unbind(self.event_name, self.__action__) else: player.unbind(self.event_name, func) def __deepcopy__(self, memo): new = super().__deepcopy__(memo) new.player = copy.deepcopy(self.player, memo) return new def __to_json__(self): super_json = super().__to_json__() super_json.update({ 'player': self.player }) return super_json def __from_json__(self, player, condition=None): if condition: condition = Condition.from_json(**condition) player = Player.from_json(player) self.__init__(condition, player) return self class Effect(Tag): def __init__(self, event, tags): self.event = event if isinstance(tags, list): self.tags = tags else: self.tags = [tags] self.owner = None self.other = None def apply(self): self.event.bind(self.owner, self._find_target) def unapply(self): self.event.unbind(self.owner, self._find_target) def set_owner(self, owner): self.owner = owner def _find_target(self, focus=None, other=None, *args): for tag in self.tags: if not tag.do(self.owner, focus, other): break def __to_json__(self): return { 'event': self.event, 'tags': self.tags, } @staticmethod def from_json(event, tags): tags = [ActionTag.from_json(**tag) for tag in tags] event = Event.from_json(**event) return Effect(event, tags) class Condition(JSONObject, metaclass=abc.ABCMeta): @abc.abstractmethod def evaluate(self, target, *args): pass @staticmethod def from_json(name, **kwargs): import hearthbreaker.tags.condition as action_mod cls_name = string.capwords(name, '_').replace("_", "") cls = getattr(action_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) @abc.abstractmethod def __to_json__(self): pass class ActionTag(Tag): def __init__(self, actions, selector, condition=None): if isinstance(actions, list): self.actions = actions else: self.actions = [actions] self.selector = selector self.condition = condition def do(self, owner, target=None, other=None): if self.condition: if not self.condition.evaluate(owner, target): return targets = self.selector.choose_targets(owner, target) found_target = False for t in targets: found_target = True if t is owner or t.is_valid(): for action in self.actions: action.act(owner, t, other) return found_target def __to_json__(self): if self.condition: return { 'actions': self.actions, 'selector': self.selector, 'condition': self.condition, } return { 'actions': self.actions, 'selector': self.selector } @classmethod def from_json(cls, actions, selector, condition=None): action = [Action.from_json(**a) for a in actions] selector = Selector.from_json(**selector) if condition: condition = Condition.from_json(**condition) return cls(action, selector, condition) class Deathrattle(ActionTag): def do(self, target): super().do(target) target.player.game.check_delayed() class Spell(ActionTag): def __init__(self, actions, selector, condition=None): super().__init__(actions, selector, condition) class CardQuery(JSONObject, metaclass=abc.ABCMeta): def __init__(self): pass @abc.abstractmethod def get_card(self, target, player, owner): pass @abc.abstractmethod def __to_json__(self): pass @staticmethod def from_json(query): from hearthbreaker.tags.card_source import SpecificCard, CardList, HandSource, \ DeckSource, CollectionSource, ObjectSource, LastCard, Same if isinstance(query, str): return SpecificCard.__from_json__(query) elif isinstance(query, list): return CardList.__from_json__(query) elif query['name'] == 'object': return ObjectSource.__from_json__(**query) elif query['name'] == 'hand': return HandSource.__from_json__(**query) elif query['name'] == 'deck': return DeckSource.__from_json__(**query) elif query['name'] == 'collection': return CollectionSource.__from_json__(**query) elif query['name'] == 'last_card': return LastCard() elif query['name'] == 'same': return Same() else: raise Exception(query['name']) class Battlecry(ActionTag): def __init__(self, actions, selector, condition=None): super().__init__(actions, selector, condition) class Choice(ActionTag): def __init__(self, card, actions, selector, condition=None): self.card = card super().__init__(actions, selector, condition) def __to_json__(self): super_json = super().__to_json__() super_json['card'] = self.card.ref_name return super_json @staticmethod def from_json(card, actions, selector, condition=None): from hearthbreaker.engine import card_lookup actions = [Action.from_json(**action) for action in actions] selector = Selector.from_json(**selector) if condition: condition = Condition.from_json(**condition) card = card_lookup(card) return Choice(card, actions, selector, condition) class Function(JSONObject, metaclass=abc.ABCMeta): def do(self, target, *args): pass @staticmethod def from_json(name, **kwargs): import hearthbreaker.tags.selector as selector_mod cls_name = string.capwords(name, '_').replace("_", "") cls = getattr(selector_mod, cls_name) obj = cls.__new__(cls) return obj.__from_json__(**kwargs) def __from_json__(self, **kwargs): self.__init__(**kwargs) return self class Context(metaclass=abc.ABCMeta): """ Handles computations for actions and statuses that change based on if they are part of a battlecry, hero power or spell. """ def __init__(self, player): """ Creates a context associated with a particular player :param player: The player that this context is associated with :type player: :class:`hearthbreaker.engine.Player` """ self.player = player @abc.abstractmethod def filter_targets(self, targets): """ Filter the targets that the user can choose. Spells cannot target minions who are not spell_targetable, for example. :param targets: All the possible target characters :type targets: [:class:`hearthbreaker.game_objects.Character`] :return: A list of targets which has been filtered according to the context :rtype: [:class:`hearthbreaker.game_objects.Character`] """ pass @abc.abstractmethod def damage(self, amount, target): """ Damage the target according to the context. Spells must include spell_damage, while battlecries and physical attacks do not :param amount: The amount to damage the target :type amount: :class:`hearthbreaker.game_objects.Character` :param target: The character getting damaged """ pass @abc.abstractmethod def heal(self, amount, target): """ Heals the target according to the context. Spells and hero power include heal_multiplier, while battlecries do not :param amount: The amount to damage the target :type amount: :class:`hearthbreaker.game_objects.Character` :param target: The character getting damaged """ pass
jomyhuang/sdwle
hearthbreaker/tags/base.py
Python
mit
20,904
"""Test script for ftplib module.""" # Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS # environment import ftplib import asyncore import asynchat import socket import StringIO import errno import os try: import ssl except ImportError: ssl = None from unittest import TestCase from test import test_support from test.test_support import HOST threading = test_support.import_module('threading') # the dummy data returned by server over the data channel when # RETR, LIST and NLST commands are issued RETR_DATA = 'abcde12345\r\n' * 1000 LIST_DATA = 'foo\r\nbar\r\n' NLST_DATA = 'foo\r\nbar\r\n' class DummyDTPHandler(asynchat.async_chat): dtp_conn_closed = False def __init__(self, conn, baseclass): asynchat.async_chat.__init__(self, conn) self.baseclass = baseclass self.baseclass.last_received_data = '' def handle_read(self): self.baseclass.last_received_data += self.recv(1024) def handle_close(self): # XXX: this method can be called many times in a row for a single # connection, including in clear-text (non-TLS) mode. # (behaviour witnessed with test_data_connection) if not self.dtp_conn_closed: self.baseclass.push('226 transfer complete') self.close() self.dtp_conn_closed = True def handle_error(self): raise class DummyFTPHandler(asynchat.async_chat): dtp_handler = DummyDTPHandler def __init__(self, conn): asynchat.async_chat.__init__(self, conn) self.set_terminator("\r\n") self.in_buffer = [] self.dtp = None self.last_received_cmd = None self.last_received_data = '' self.next_response = '' self.rest = None self.push('220 welcome') def collect_incoming_data(self, data): self.in_buffer.append(data) def found_terminator(self): line = ''.join(self.in_buffer) self.in_buffer = [] if self.next_response: self.push(self.next_response) self.next_response = '' cmd = line.split(' ')[0].lower() self.last_received_cmd = cmd space = line.find(' ') if space != -1: arg = line[space + 1:] else: arg = "" if hasattr(self, 'cmd_' + cmd): method = getattr(self, 'cmd_' + cmd) method(arg) else: self.push('550 command "%s" not understood.' %cmd) def handle_error(self): raise def push(self, data): asynchat.async_chat.push(self, data + '\r\n') def cmd_port(self, arg): addr = map(int, arg.split(',')) ip = '%d.%d.%d.%d' %tuple(addr[:4]) port = (addr[4] * 256) + addr[5] s = socket.create_connection((ip, port), timeout=2) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_pasv(self, arg): sock = socket.socket() sock.bind((self.socket.getsockname()[0], 0)) sock.listen(5) sock.settimeout(2) ip, port = sock.getsockname()[:2] ip = ip.replace('.', ',') p1, p2 = divmod(port, 256) self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2)) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_eprt(self, arg): af, ip, port = arg.split(arg[0])[1:-1] port = int(port) s = socket.create_connection((ip, port), timeout=2) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_epsv(self, arg): sock = socket.socket(socket.AF_INET6) sock.bind((self.socket.getsockname()[0], 0)) sock.listen(5) sock.settimeout(2) port = sock.getsockname()[1] self.push('229 entering extended passive mode (|||%d|)' %port) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_echo(self, arg): # sends back the received string (used by the test suite) self.push(arg) def cmd_user(self, arg): self.push('331 username ok') def cmd_pass(self, arg): self.push('230 password ok') def cmd_acct(self, arg): self.push('230 acct ok') def cmd_rnfr(self, arg): self.push('350 rnfr ok') def cmd_rnto(self, arg): self.push('250 rnto ok') def cmd_dele(self, arg): self.push('250 dele ok') def cmd_cwd(self, arg): self.push('250 cwd ok') def cmd_size(self, arg): self.push('250 1000') def cmd_mkd(self, arg): self.push('257 "%s"' %arg) def cmd_rmd(self, arg): self.push('250 rmd ok') def cmd_pwd(self, arg): self.push('257 "pwd ok"') def cmd_type(self, arg): self.push('200 type ok') def cmd_quit(self, arg): self.push('221 quit ok') self.close() def cmd_stor(self, arg): self.push('125 stor ok') def cmd_rest(self, arg): self.rest = arg self.push('350 rest ok') def cmd_retr(self, arg): self.push('125 retr ok') if self.rest is not None: offset = int(self.rest) else: offset = 0 self.dtp.push(RETR_DATA[offset:]) self.dtp.close_when_done() self.rest = None def cmd_list(self, arg): self.push('125 list ok') self.dtp.push(LIST_DATA) self.dtp.close_when_done() def cmd_nlst(self, arg): self.push('125 nlst ok') self.dtp.push(NLST_DATA) self.dtp.close_when_done() class DummyFTPServer(asyncore.dispatcher, threading.Thread): handler = DummyFTPHandler def __init__(self, address, af=socket.AF_INET): threading.Thread.__init__(self) asyncore.dispatcher.__init__(self) self.create_socket(af, socket.SOCK_STREAM) self.bind(address) self.listen(5) self.active = False self.active_lock = threading.Lock() self.host, self.port = self.socket.getsockname()[:2] def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: self.active_lock.acquire() asyncore.loop(timeout=0.1, count=1) self.active_lock.release() asyncore.close_all(ignore_all=True) def stop(self): assert self.active self.active = False self.join() def handle_accept(self): conn, addr = self.accept() self.handler = self.handler(conn) self.close() def handle_connect(self): self.close() handle_read = handle_connect def writable(self): return 0 def handle_error(self): raise if ssl is not None: CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem") class SSLConnection(object, asyncore.dispatcher): """An asyncore.dispatcher subclass supporting TLS/SSL.""" _ssl_accepting = False _ssl_closing = False def secure_connection(self): self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False, certfile=CERTFILE, server_side=True, do_handshake_on_connect=False, ssl_version=ssl.PROTOCOL_SSLv23) self._ssl_accepting = True def _do_ssl_handshake(self): try: self.socket.do_handshake() except ssl.SSLError, err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() raise except socket.error, err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def _do_ssl_shutdown(self): self._ssl_closing = True try: self.socket = self.socket.unwrap() except ssl.SSLError, err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return except socket.error, err: # Any "socket error" corresponds to a SSL_ERROR_SYSCALL return # from OpenSSL's SSL_shutdown(), corresponding to a # closed socket condition. See also: # http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html pass self._ssl_closing = False super(SSLConnection, self).close() def handle_read_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_read_event() def handle_write_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_write_event() def send(self, data): try: return super(SSLConnection, self).send(data) except ssl.SSLError, err: if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return 0 raise def recv(self, buffer_size): try: return super(SSLConnection, self).recv(buffer_size) except ssl.SSLError, err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return '' if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): self.handle_close() return '' raise def handle_error(self): raise def close(self): if (isinstance(self.socket, ssl.SSLSocket) and self.socket._sslobj is not None): self._do_ssl_shutdown() class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler): """A DummyDTPHandler subclass supporting TLS/SSL.""" def __init__(self, conn, baseclass): DummyDTPHandler.__init__(self, conn, baseclass) if self.baseclass.secure_data_channel: self.secure_connection() class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler): """A DummyFTPHandler subclass supporting TLS/SSL.""" dtp_handler = DummyTLS_DTPHandler def __init__(self, conn): DummyFTPHandler.__init__(self, conn) self.secure_data_channel = False def cmd_auth(self, line): """Set up secure control channel.""" self.push('234 AUTH TLS successful') self.secure_connection() def cmd_pbsz(self, line): """Negotiate size of buffer for secure data transfer. For TLS/SSL the only valid value for the parameter is '0'. Any other value is accepted but ignored. """ self.push('200 PBSZ=0 successful.') def cmd_prot(self, line): """Setup un/secure data channel.""" arg = line.upper() if arg == 'C': self.push('200 Protection set to Clear') self.secure_data_channel = False elif arg == 'P': self.push('200 Protection set to Private') self.secure_data_channel = True else: self.push("502 Unrecognized PROT type (use C or P).") class DummyTLS_FTPServer(DummyFTPServer): handler = DummyTLS_FTPHandler class TestFTPClass(TestCase): def setUp(self): self.server = DummyFTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP(timeout=2) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def test_getwelcome(self): self.assertEqual(self.client.getwelcome(), '220 welcome') def test_sanitize(self): self.assertEqual(self.client.sanitize('foo'), repr('foo')) self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****')) self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****')) def test_exceptions(self): self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400') self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599') self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999') def test_all_errors(self): exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm, ftplib.error_proto, ftplib.Error, IOError, EOFError) for x in exceptions: try: raise x('exception not included in all_errors set') except ftplib.all_errors: pass def test_set_pasv(self): # passive mode is supposed to be enabled by default self.assertTrue(self.client.passiveserver) self.client.set_pasv(True) self.assertTrue(self.client.passiveserver) self.client.set_pasv(False) self.assertFalse(self.client.passiveserver) def test_voidcmd(self): self.client.voidcmd('echo 200') self.client.voidcmd('echo 299') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300') def test_login(self): self.client.login() def test_acct(self): self.client.acct('passwd') def test_rename(self): self.client.rename('a', 'b') self.server.handler.next_response = '200' self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b') def test_delete(self): self.client.delete('foo') self.server.handler.next_response = '199' self.assertRaises(ftplib.error_reply, self.client.delete, 'foo') def test_size(self): self.client.size('foo') def test_mkd(self): dir = self.client.mkd('/foo') self.assertEqual(dir, '/foo') def test_rmd(self): self.client.rmd('foo') def test_pwd(self): dir = self.client.pwd() self.assertEqual(dir, 'pwd ok') def test_quit(self): self.assertEqual(self.client.quit(), '221 quit ok') # Ensure the connection gets closed; sock attribute should be None self.assertEqual(self.client.sock, None) def test_retrbinary(self): received = [] self.client.retrbinary('retr', received.append) self.assertEqual(''.join(received), RETR_DATA) def test_retrbinary_rest(self): for rest in (0, 10, 20): received = [] self.client.retrbinary('retr', received.append, rest=rest) self.assertEqual(''.join(received), RETR_DATA[rest:], msg='rest test case %d %d %d' % (rest, len(''.join(received)), len(RETR_DATA[rest:]))) def test_retrlines(self): received = [] self.client.retrlines('retr', received.append) self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', '')) def test_storbinary(self): f = StringIO.StringIO(RETR_DATA) self.client.storbinary('stor', f) self.assertEqual(self.server.handler.last_received_data, RETR_DATA) # test new callback arg flag = [] f.seek(0) self.client.storbinary('stor', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) def test_storbinary_rest(self): f = StringIO.StringIO(RETR_DATA) for r in (30, '30'): f.seek(0) self.client.storbinary('stor', f, rest=r) self.assertEqual(self.server.handler.rest, str(r)) def test_storlines(self): f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n')) self.client.storlines('stor', f) self.assertEqual(self.server.handler.last_received_data, RETR_DATA) # test new callback arg flag = [] f.seek(0) self.client.storlines('stor foo', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) def test_nlst(self): self.client.nlst() self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1]) def test_dir(self): l = [] self.client.dir(lambda x: l.append(x)) self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', '')) def test_makeport(self): self.client.makeport() # IPv4 is in use, just make sure send_eprt has not been used self.assertEqual(self.server.handler.last_received_cmd, 'port') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), 2) conn.close() # IPv4 is in use, just make sure send_epsv has not been used self.assertEqual(self.server.handler.last_received_cmd, 'pasv') class TestIPv6Environment(TestCase): def setUp(self): self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6) self.server.start() self.client = ftplib.FTP() self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def test_af(self): self.assertEqual(self.client.af, socket.AF_INET6) def test_makeport(self): self.client.makeport() self.assertEqual(self.server.handler.last_received_cmd, 'eprt') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), 2) conn.close() self.assertEqual(self.server.handler.last_received_cmd, 'epsv') def test_transfer(self): def retr(): received = [] self.client.retrbinary('retr', received.append) self.assertEqual(''.join(received), RETR_DATA) self.client.set_pasv(True) retr() self.client.set_pasv(False) retr() class TestTLS_FTPClassMixin(TestFTPClass): """Repeat TestFTPClass tests starting the TLS layer for both control and data connections first. """ def setUp(self): self.server = DummyTLS_FTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP_TLS(timeout=2) self.client.connect(self.server.host, self.server.port) # enable TLS self.client.auth() self.client.prot_p() class TestTLS_FTPClass(TestCase): """Specific TLS_FTP class tests.""" def setUp(self): self.server = DummyTLS_FTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP_TLS(timeout=2) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def test_control_connection(self): self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.auth() self.assertIsInstance(self.client.sock, ssl.SSLSocket) def test_data_connection(self): # clear text sock = self.client.transfercmd('list') self.assertNotIsInstance(sock, ssl.SSLSocket) sock.close() self.assertEqual(self.client.voidresp(), "226 transfer complete") # secured, after PROT P self.client.prot_p() sock = self.client.transfercmd('list') self.assertIsInstance(sock, ssl.SSLSocket) sock.close() self.assertEqual(self.client.voidresp(), "226 transfer complete") # PROT C is issued, the connection must be in cleartext again self.client.prot_c() sock = self.client.transfercmd('list') self.assertNotIsInstance(sock, ssl.SSLSocket) sock.close() self.assertEqual(self.client.voidresp(), "226 transfer complete") def test_login(self): # login() is supposed to implicitly secure the control connection self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.login() self.assertIsInstance(self.client.sock, ssl.SSLSocket) # make sure that AUTH TLS doesn't get issued again self.client.login() def test_auth_issued_twice(self): self.client.auth() self.assertRaises(ValueError, self.client.auth) def test_auth_ssl(self): try: self.client.ssl_version = ssl.PROTOCOL_SSLv3 self.client.auth() self.assertRaises(ValueError, self.client.auth) finally: self.client.ssl_version = ssl.PROTOCOL_TLSv1 class TestTimeouts(TestCase): def setUp(self): self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(3) self.port = test_support.bind_port(self.sock) threading.Thread(target=self.server, args=(self.evt,self.sock)).start() # Wait for the server to be ready. self.evt.wait() self.evt.clear() ftplib.FTP.port = self.port def tearDown(self): self.evt.wait() def server(self, evt, serv): # This method sets the evt 3 times: # 1) when the connection is ready to be accepted. # 2) when it is safe for the caller to close the connection # 3) when we have closed the socket serv.listen(5) # (1) Signal the caller that we are ready to accept the connection. evt.set() try: conn, addr = serv.accept() except socket.timeout: pass else: conn.send("1 Hola mundo\n") # (2) Signal the caller that it is safe to close the socket. evt.set() conn.close() finally: serv.close() # (3) Signal the caller that we are done. evt.set() def testTimeoutDefault(self): # default -- use global socket timeout self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP("localhost") finally: socket.setdefaulttimeout(None) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutNone(self): # no timeout -- do not use global socket timeout self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP("localhost", timeout=None) finally: socket.setdefaulttimeout(None) self.assertTrue(ftp.sock.gettimeout() is None) self.evt.wait() ftp.close() def testTimeoutValue(self): # a value ftp = ftplib.FTP(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutConnect(self): ftp = ftplib.FTP() ftp.connect(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDifferentOrder(self): ftp = ftplib.FTP(timeout=30) ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDirectAccess(self): ftp = ftplib.FTP() ftp.timeout = 30 ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def test_main(): tests = [TestFTPClass, TestTimeouts] if socket.has_ipv6: try: DummyFTPServer((HOST, 0), af=socket.AF_INET6) except socket.error: pass else: tests.append(TestIPv6Environment) if ssl is not None: tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass]) thread_info = test_support.threading_setup() try: test_support.run_unittest(*tests) finally: test_support.threading_cleanup(*thread_info) if __name__ == '__main__': test_main()
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.7/Lib/test/test_ftplib.py
Python
mit
25,025
from mongoengine.common import _import_class from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db __all__ = ('switch_db', 'switch_collection', 'no_dereference', 'no_sub_classes', 'query_counter') class switch_db(object): """switch_db alias context manager. Example :: # Register connections register_connection('default', 'mongoenginetest') register_connection('testdb-1', 'mongoenginetest2') class Group(Document): name = StringField() Group(name='test').save() # Saves in the default db with switch_db(Group, 'testdb-1') as Group: Group(name='hello testdb!').save() # Saves in testdb-1 """ def __init__(self, cls, db_alias): """Construct the switch_db context manager :param cls: the class to change the registered db :param db_alias: the name of the specific database to use """ self.cls = cls self.collection = cls._get_collection() self.db_alias = db_alias self.ori_db_alias = cls._meta.get('db_alias', DEFAULT_CONNECTION_NAME) def __enter__(self): """Change the db_alias and clear the cached collection.""" self.cls._meta['db_alias'] = self.db_alias self.cls._collection = None return self.cls def __exit__(self, t, value, traceback): """Reset the db_alias and collection.""" self.cls._meta['db_alias'] = self.ori_db_alias self.cls._collection = self.collection class switch_collection(object): """switch_collection alias context manager. Example :: class Group(Document): name = StringField() Group(name='test').save() # Saves in the default db with switch_collection(Group, 'group1') as Group: Group(name='hello testdb!').save() # Saves in group1 collection """ def __init__(self, cls, collection_name): """Construct the switch_collection context manager. :param cls: the class to change the registered db :param collection_name: the name of the collection to use """ self.cls = cls self.ori_collection = cls._get_collection() self.ori_get_collection_name = cls._get_collection_name self.collection_name = collection_name def __enter__(self): """Change the _get_collection_name and clear the cached collection.""" @classmethod def _get_collection_name(cls): return self.collection_name self.cls._get_collection_name = _get_collection_name self.cls._collection = None return self.cls def __exit__(self, t, value, traceback): """Reset the collection.""" self.cls._collection = self.ori_collection self.cls._get_collection_name = self.ori_get_collection_name class no_dereference(object): """no_dereference context manager. Turns off all dereferencing in Documents for the duration of the context manager:: with no_dereference(Group) as Group: Group.objects.find() """ def __init__(self, cls): """Construct the no_dereference context manager. :param cls: the class to turn dereferencing off on """ self.cls = cls ReferenceField = _import_class('ReferenceField') GenericReferenceField = _import_class('GenericReferenceField') ComplexBaseField = _import_class('ComplexBaseField') self.deref_fields = [k for k, v in self.cls._fields.iteritems() if isinstance(v, (ReferenceField, GenericReferenceField, ComplexBaseField))] def __enter__(self): """Change the objects default and _auto_dereference values.""" for field in self.deref_fields: self.cls._fields[field]._auto_dereference = False return self.cls def __exit__(self, t, value, traceback): """Reset the default and _auto_dereference values.""" for field in self.deref_fields: self.cls._fields[field]._auto_dereference = True return self.cls class no_sub_classes(object): """no_sub_classes context manager. Only returns instances of this class and no sub (inherited) classes:: with no_sub_classes(Group) as Group: Group.objects.find() """ def __init__(self, cls): """Construct the no_sub_classes context manager. :param cls: the class to turn querying sub classes on """ self.cls = cls def __enter__(self): """Change the objects default and _auto_dereference values.""" self.cls._all_subclasses = self.cls._subclasses self.cls._subclasses = (self.cls,) return self.cls def __exit__(self, t, value, traceback): """Reset the default and _auto_dereference values.""" self.cls._subclasses = self.cls._all_subclasses delattr(self.cls, '_all_subclasses') return self.cls class query_counter(object): """Query_counter context manager to get the number of queries.""" def __init__(self): """Construct the query_counter.""" self.counter = 0 self.db = get_db() def __enter__(self): """On every with block we need to drop the profile collection.""" self.db.set_profiling_level(0) self.db.system.profile.drop() self.db.set_profiling_level(2) return self def __exit__(self, t, value, traceback): """Reset the profiling level.""" self.db.set_profiling_level(0) def __eq__(self, value): """== Compare querycounter.""" counter = self._get_count() return value == counter def __ne__(self, value): """!= Compare querycounter.""" return not self.__eq__(value) def __lt__(self, value): """< Compare querycounter.""" return self._get_count() < value def __le__(self, value): """<= Compare querycounter.""" return self._get_count() <= value def __gt__(self, value): """> Compare querycounter.""" return self._get_count() > value def __ge__(self, value): """>= Compare querycounter.""" return self._get_count() >= value def __int__(self): """int representation.""" return self._get_count() def __repr__(self): """repr query_counter as the number of queries.""" return u"%s" % self._get_count() def _get_count(self): """Get the number of queries.""" ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}} count = self.db.system.profile.find(ignore_query).count() - self.counter self.counter += 1 return count
MakerReduxCorp/mongoengine
mongoengine/context_managers.py
Python
mit
6,830
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Convenience wrapper for starting an appengine tool.""" import os import sys if not hasattr(sys, 'version_info'): sys.stderr.write('Very old versions of Python are not supported. Please ' 'use version 2.7.\n') sys.exit(1) version_tuple = tuple(sys.version_info[:2]) if version_tuple < (2, 7): sys.stderr.write('Error: Python %d.%d is not supported. Please use ' 'version 2.7.\n' % version_tuple) sys.exit(1) def _get_dir_path(sibling): """Get a path to the directory of this script. By default, the canonical path (symlinks resolved) will be returned. In some environments the canonical directory is not sufficient because different parts of the SDK are referenced by symlinks, including this very module's file. In this case, the non-canonical path to this file's directory will be returned (i.e., the directory where the symlink lives, not the directory where it points). Args: sibling: Relative path to a sibiling of this module file. Choose a sibling that is potentially symlinked into the parent directory. Returns: A directory name. Raises: ValueError: If no proper path could be determined. """ if 'GAE_SDK_ROOT' in os.environ: gae_sdk_root = os.path.abspath(os.environ['GAE_SDK_ROOT']) os.environ['GAE_SDK_ROOT'] = gae_sdk_root for dir_path in [gae_sdk_root, os.path.join(gae_sdk_root, 'google_appengine')]: if os.path.exists(os.path.join(dir_path, sibling)): return dir_path raise ValueError('GAE_SDK_ROOT %r does not refer to a valid SDK ' 'directory' % gae_sdk_root) else: py_file = __file__.replace('.pyc', '.py') dir_paths = [os.path.abspath(os.path.dirname(os.path.realpath(py_file))), os.path.abspath(os.path.dirname(py_file))] for dir_path in dir_paths: sibling_path = os.path.join(dir_path, sibling) if os.path.exists(sibling_path): return dir_path raise ValueError('Could not determine SDK root; please set GAE_SDK_ROOT ' 'environment variable.') _DIR_PATH = _get_dir_path(os.path.join('lib', 'ipaddr')) _SCRIPT_DIR = os.path.join(_DIR_PATH, 'google', 'appengine', 'tools') _DEVAPPSERVER2_DIR = os.path.join( _DIR_PATH, 'google', 'appengine', 'tools', 'devappserver2') _PHP_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'php') _PYTHON_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'python') _STUB_DEPENDENCIES = [ os.path.join(_DIR_PATH, 'lib', 'antlr3'), os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'), os.path.join(_DIR_PATH, 'lib', 'ipaddr'), os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'), os.path.join(_DIR_PATH, 'lib', 'rsa'), os.path.join(_DIR_PATH, 'lib', 'pyasn1'), os.path.join(_DIR_PATH, 'lib', 'pyasn1_modules'), ] EXTRA_PATHS = _STUB_DEPENDENCIES + [ _DIR_PATH, os.path.join(_DIR_PATH, 'lib', 'simplejson'), os.path.join(_DIR_PATH, 'lib', 'django-1.4'), os.path.join(_DIR_PATH, 'lib', 'endpoints-1.0'), os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'), os.path.join(_DIR_PATH, 'lib', 'protorpc-1.0'), os.path.join(_DIR_PATH, 'lib', 'PyAMF-0.6.1'), os.path.join(_DIR_PATH, 'lib', 'markupsafe-0.15'), os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'), os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.2'), ] _DEVAPPSERVER2_PATHS = _STUB_DEPENDENCIES + [ _DIR_PATH, os.path.join(_DIR_PATH, 'lib', 'concurrent'), os.path.join(_DIR_PATH, 'lib', 'cherrypy'), os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'), os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'), os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.1'), ] _PHP_RUNTIME_PATHS = [ _DIR_PATH, os.path.join(_DIR_PATH, 'lib', 'concurrent'), os.path.join(_DIR_PATH, 'lib', 'cherrypy'), os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'), ] _PYTHON_RUNTIME_PATHS = [ _DIR_PATH, os.path.join(_DIR_PATH, 'lib', 'concurrent'), os.path.join(_DIR_PATH, 'lib', 'cherrypy'), os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'), os.path.join(_DIR_PATH, 'lib', 'protorpc-1.0'), os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'), ] _BOOTSTAP_NAME_TO_REAL_NAME = { 'dev_appserver.py': 'devappserver2.py', '_php_runtime.py': 'runtime.py', '_python_runtime.py': 'runtime.py', } _SCRIPT_TO_DIR = { 'dev_appserver.py': _DEVAPPSERVER2_DIR, '_php_runtime.py': _PHP_RUNTIME_DIR, '_python_runtime.py': _PYTHON_RUNTIME_DIR, } _SYS_PATH_ADDITIONS = { 'dev_appserver.py': _DEVAPPSERVER2_PATHS, '_php_runtime.py': _PHP_RUNTIME_PATHS, '_python_runtime.py': _PYTHON_RUNTIME_PATHS, } def fix_sys_path(extra_extra_paths=()): """Fix the sys.path to include our extra paths. fix_sys_path should be called before running testbed-based unit tests so that third-party modules are correctly added to sys.path. """ sys.path[1:1] = EXTRA_PATHS def _run_file(file_path, globals_, script_dir=_SCRIPT_DIR): """Execute the file at the specified path with the passed-in globals.""" script_name = os.path.basename(file_path) sys.path = _SYS_PATH_ADDITIONS[script_name] + sys.path if 'google' in sys.modules: del sys.modules['google'] script_dir = _SCRIPT_TO_DIR.get(script_name, script_dir) script_name = _BOOTSTAP_NAME_TO_REAL_NAME.get(script_name, script_name) script_path = os.path.join(script_dir, script_name) execfile(script_path, globals_) if __name__ == '__main__': _run_file(__file__, globals())
pigeonflight/strider-plone
docker/appengine/dev_appserver.py
Python
mit
6,126
import logging import networkx from django.db import transaction from django.utils import timezone from regparser.web.index.models import Entry as DBEntry from regparser.web.index.models import Dependency, DependencyNode logger = logging.getLogger(__name__) class Missing(Exception): def __init__(self, key, dependency): super(Missing, self).__init__( "Missing dependency. {0} is needed for {1}".format( dependency, key)) self.dependency = dependency self.key = key class Graph(object): """Track dependencies between input and output files, storing them in `dependencies.gml` for later retrieval. This lets us know that an output with dependencies needs to be updated if those dependencies have been updated""" def __init__(self): self.deserialize() self.rebuild() @transaction.atomic def serialize(self): """Convert the in-memory self._graph into db records""" Dependency.objects.all().delete() existing = {node.label for node in DependencyNode.objects.all()} new_vertices = set(self._graph.nodes()) - existing DependencyNode.objects.bulk_create( DependencyNode(label=label) for label in new_vertices) Dependency.objects.bulk_create( Dependency(depender_id=depender, target_id=target) for (depender, target) in self._graph.edges()) @transaction.atomic def deserialize(self): """Convert db records into the in-memory self._graph""" self._graph = networkx.DiGraph() self._graph.add_nodes_from( n.label for n in DependencyNode.objects.all()) self._graph.add_edges_from( (e.depender_id, e.target_id) for e in Dependency.objects.all()) def add(self, output_entry, input_entry): """Add a dependency where output tuple relies on input_tuple""" self._graph.add_edge(str(input_entry), str(output_entry)) self.rebuild() self.serialize() # @todo: make this incremental def __contains__(self, key): """Does the graph contain a particular node?""" return str(key) in self._graph def node(self, filename): """Get node attributes for a specific filename. If the node isn't present, create it""" filename = str(filename) if filename not in self._graph: self._graph.add_node(filename) return self._graph.node[filename] def dependencies(self, filename): """What does other nodes does this filename *directly* depend on?""" filename = str(filename) if filename in self._graph: return self._graph.predecessors(filename) else: return [] def rebuild(self): """Scan the modification times of all the nodes in the graph to determine what's been updated. We mark nodes "stale" if one of their dependencies has been updated since the depending node was built. Use topological sort to make sure we process dependencies first.""" for node in networkx.topological_sort(self._graph): entry = DBEntry.objects.filter(label_id=node).first() if entry: modtime = entry.modified stale = '' else: modtime = timezone.now() stale = node # Check immediate dependencies (which were updated in a previous # step) for dependency in self.dependencies(node): if self.node(dependency)['modtime'] > modtime: stale = dependency else: stale = self.node(dependency)['stale'] or stale self.node(node).update(modtime=modtime, stale=stale) def validate_for(self, entry): """Raise an exception if a particular output has stale dependencies""" key = str(entry) logger.debug("Validating dependencies for %r", key) for dependency in self.dependencies(key): if self.node(dependency).get('stale'): raise Missing(key, self.node(dependency)['stale']) def is_stale(self, entry): """Determine if a file needs to be rebuilt""" return bool(self.node(str(entry)).get('stale'))
tadhg-ohiggins/regulations-parser
regparser/index/dependency.py
Python
cc0-1.0
4,316
import unittest from PyFoam.Applications.PlotWatcher import PlotWatcher theSuite=unittest.TestSuite()
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
unittests/Applications/test_PlotWatcher.py
Python
gpl-2.0
104
import os cluster_path = '/scratch/users/ctsai89/CHE444/2016/cluster_fixed_benchmark' systems = open('systems.txt').readlines() os.system('ssh-keygen -R sherlock.stanford.edu') for system in systems: try: elements = system.strip() os.system('scp ctsai89@sherlock.stanford.edu:'+cluster_path+'/Fixed_opt_lattice/'+elements+'.traj .') except: print system+' not optimized'
brohr/brohr.github.io
Fixed_Lattice_Clusters/download_structures.py
Python
gpl-2.0
405
"""Manual test for figure.show() in the inline matplotlib backend. This script should be loaded for interactive use (via %load) into a qtconsole or notebook initialized with the inline backend. Expected behavior: only *one* copy of the figure is shown. For further details: https://github.com/ipython/ipython/issues/1612 https://github.com/matplotlib/matplotlib/issues/835 """ import numpy as np import matplotlib.pyplot as plt plt.ioff() x = np.random.uniform(-5, 5, size=(100)) y = np.random.uniform(-5, 5, size=(100)) f = plt.figure() plt.scatter(x, y) plt.plot(y) f.show()
pacoqueen/ginn
extra/install/ipython2/ipython-5.10.0/tools/tests/inline_figshow.py
Python
gpl-2.0
583
# Embedded file name: /usr/lib/enigma2/python/Components/Converter/ODRefString.py from Components.Converter.Converter import Converter from Components.Element import cached from Screens.InfoBar import InfoBar class ODRefString(Converter, object): CURRENT = 0 EVENT = 1 def __init__(self, type): Converter.__init__(self, type) self.CHANSEL = None self.type = {'CurrentRef': self.CURRENT, 'ServicelistRef': self.EVENT}[type] return @cached def getText(self): if self.type == self.EVENT: antw = str(self.source.service.toString()) if antw[:6] == '1:7:0:': teilantw = antw.split('ORDER BY name:') if len(teilantw) > 1: teil2antw = teilantw[1].split() if len(teil2antw) > 0: return teil2antw[0] elif antw[:6] == '1:7:1:': teilantw = antw.split('.') if len(teilantw) > 1: return teilantw[1] return antw elif self.type == self.CURRENT: if self.CHANSEL == None: self.CHANSEL = InfoBar.instance.servicelist if len(InfoBar.instance.session.dialog_stack) > 1: for zz in InfoBar.instance.session.dialog_stack: if str(zz[0]) == "<class 'Screens.MovieSelection.MovieSelection'>" or str(InfoBar.instance.session.dialog_stack[1][0]) == "<class 'Screens.InfoBar.MoviePlayer'>": return self.source.text vSrv = self.CHANSEL.servicelist.getCurrent() return str(vSrv.toString()) else: return 'na' return text = property(getText)
trunca/enigma2
lib/python/Components/Converter/ODRefString.py
Python
gpl-2.0
1,792
# This file is part of Rubber and thus covered by the GPL # (c) Emmanuel Beffara, 2003--2006 """ Dependency analysis and environment parsing for package 'listings' in Rubber. """ def setup (document, context): global doc doc = document doc.hook_macro('lstinputlisting', 'oa', hook_input) doc.hook_macro('lstnewenvironment', 'a', hook_newenvironment) doc.hook_begin('lstlisting', lambda loc: doc.h_begin_verbatim(loc, env='lstlisting')) def hook_input (loc, opt, file): if file.find('\\') < 0 and file.find('#') < 0: doc.add_source(file) def hook_newenvironment (loc, name): doc.hook_begin(name, lambda loc: doc.h_begin_verbatim(loc, env=name))
sre/rubber
src/latex_modules/listings.py
Python
gpl-2.0
660
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Test unit for the miscutil/datastructures module. """ from operator import delitem, setitem from werkzeug.datastructures import MultiDict from invenio_testing import InvenioTestCase from invenio_utils.datastructures import DotableDict, LaziestDict, LazyDict, \ SmartDict, flatten_multidict class CallCounter(object): """Counts number of calls.""" def __init__(self, populate): self.counter = 0 self.populate = populate def __call__(self, *args, **kwargs): self.counter = self.counter + 1 return self.populate(*args, **kwargs) class TestLazyDictionaries(InvenioTestCase): """ Lazy dictionaries TestSuite. """ def test___setitem(self): lazy_dict = LazyDict() lazy_dict.__setitem__('foo', 'bar') lazy_dict.__setitem__('foo2', 'bar2') lazy_dict.__setitem__('foo3', 'bar3') self.assertEqual(lazy_dict['foo'], 'bar') self.assertEqual(lazy_dict['foo3'], 'bar3') self.assertEqual(lazy_dict['foo2'], 'bar2') def testa___delitem(self): populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) self.assertEqual(lazy_dict['foo'], 'bar') del lazy_dict['foo'] self.assertRaises(KeyError, lambda: lazy_dict['foo']) def test___delitem_on_empty_dict(self): lazy_dict = LazyDict() self.assertRaises(KeyError, delitem, lazy_dict, "foo") def test___getattr_on_empty_dict(self): lazy_dict = LazyDict() self.assertRaises(AttributeError, lambda: lazy_dict.fooattr) def test___iter(self): populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) iterator = iter(lazy_dict) self.assertEqual(iterator.next(), 1) self.assertEqual(iterator.next(), 'foo') self.assertEqual(iterator.next(), 'empty') self.assertRaises(StopIteration, iterator.next) def test_iteritems(self): populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) iterator = lazy_dict.iteritems() k, v = iterator.next() self.assertEqual(k, 1) self.assertEqual(v, 11) k, v = iterator.next() self.assertEqual(k, 'foo') self.assertEqual(v, 'bar') k, v = iterator.next() self.assertEqual(k, 'empty') self.assertEqual(v, None) self.assertRaises(StopIteration, iterator.next) def test_iterkeys(self): populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) iterator = lazy_dict.iterkeys() self.assertEqual(iterator.next(), 1) self.assertEqual(iterator.next(), 'foo') self.assertEqual(iterator.next(), 'empty') self.assertRaises(StopIteration, iterator.next) def test_itervalues(self): populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) iterator = lazy_dict.itervalues() self.assertEqual(iterator.next(), 11) self.assertEqual(iterator.next(), 'bar') self.assertEqual(iterator.next(), None) self.assertRaises(StopIteration, iterator.next) def test_lazy_dictionary(self): """Checks content of lazy dictionary and number of evaluations.""" populate = CallCounter(lambda: {'foo': 'bar', 1: 11, 'empty': None}) lazy_dict = LazyDict(populate) self.assertEqual(populate.counter, 0) self.assertEqual(lazy_dict['foo'], 'bar') self.assertEqual(populate.counter, 1) self.assertEqual(lazy_dict[1], 11) self.assertEqual(populate.counter, 1) self.assertEqual(lazy_dict['empty'], None) self.assertEqual(populate.counter, 1) # clear the cache lazy_dict.expunge() self.assertEqual(lazy_dict['foo'], 'bar') self.assertEqual(populate.counter, 2) del lazy_dict['foo'] self.assertEqual(populate.counter, 2) assert 'foo' not in lazy_dict def test_laziest_dictionary(self): populate = CallCounter( lambda k: {'foo': 'bar', 1: 11, 'empty': None}[k] ) laziest_dict = LaziestDict(populate) self.assertEqual(populate.counter, 0) self.assertEqual(laziest_dict['foo'], 'bar') self.assertEqual(laziest_dict.keys(), ['foo']) self.assertEqual(populate.counter, 1) self.assertEqual(laziest_dict[1], 11) self.assertEqual(laziest_dict.keys(), [1, 'foo']) self.assertEqual(populate.counter, 2) self.assertEqual(laziest_dict['empty'], None) self.assertEqual(laziest_dict.keys(), [1, 'foo', 'empty']) self.assertEqual(populate.counter, 3) # cached result will not cause new call self.assertEqual(laziest_dict['foo'], 'bar') self.assertEqual(populate.counter, 3) # not existing key cause new call (even multiple times) self.assertEqual(laziest_dict.get('does not exists', -1), -1) self.assertEqual(populate.counter, 4) self.assertEqual(laziest_dict.get('does not exists'), None) self.assertEqual(populate.counter, 5) def test_laziest__contains(self): populate = CallCounter( lambda k: {'foo': 'bar', 1: 11, 'empty': None}[k] ) laziest_dict = LaziestDict(populate) self.assertTrue('foo' in laziest_dict) self.assertFalse('foo2' in laziest_dict) laziest_dict2 = LaziestDict() self.assertFalse('foo2' in laziest_dict2) class TestSmartDict(InvenioTestCase): """ Smart Dictionary TestSuite """ def test_smart_dict(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] self.assertEqual(d.keys(), ['a', 'foo']) self.assertTrue('foo.a' in d) del d['foo'] self.assertEqual(d.keys(), ['a']) self.assertEqual(d['a'], [{'b': 1}, {'b': 2}, {'b': 3}]) self.assertEqual(d['a[0]'], {'b': 1}) self.assertEqual(d['a.b'], [1, 2, 3]) self.assertEqual(d['a[1:]'], [{'b': 2}, {'b': 3}]) d.set('a', {'b': 4}, extend=True) self.assertEqual(d['a'], [{'b': 1}, {'b': 2}, {'b': 3}, {'b': 4}]) d.set('a', [{'b': 1}, {'b': 2}, {'b': 3}], extend=False) self.assertEqual(d['a'], [{'b': 1}, {'b': 2}, {'b': 3}]) self.assertEqual(d.get('does not exists'), None) d = SmartDict() d.set('a.b.c[n]', 'foo', True) self.assertEqual(d['a.b.c'], ['foo']) d.set('a.b.c[n]', 'bar', True) self.assertEqual(d['a.b.c'], ['foo', 'bar']) d.set('a.b.c', ['foo'], False) self.assertEqual(d['a.b.c'], ['foo']) def test_smart__contains(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] self.assertFalse('.' in d) self.assertFalse('[' in d) def test_smart_insert_special_chars(self): d = SmartDict({'a': 'world', 'b': 'hello'}) self.assertRaises(KeyError, setitem, d, ".", "dot") self.assertRaises(KeyError, setitem, d, "[", "open bracket") self.assertRaises(KeyError, setitem, d, "]", "close bracket") def test_smart_iter(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] iterator = iter(d) self.assertEqual(iterator.next(), 'a') self.assertEqual(iterator.next(), 'foo') self.assertRaises(StopIteration, iterator.next) def test_smart_items(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] d['c.d'] = [{'e': 4}, {'f': 5}] self.assertTrue(d.items(), [('a', [{'b': 1}, {'b': 2}, {'b': 3}]), ('c', {'d': [{'e': 4}, {'f': 5}]}), ('foo', {'a': 'world', 'b': 'hello'})]) def test_smart_iteritems(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] iterator = d.iteritems() k, v = iterator.next() self.assertEqual(k, 'a') self.assertEqual(v, [{'b': 1}, {'b': 2}, {'b': 3}]) k, v = iterator.next() self.assertEqual(k, 'c') self.assertEqual(v, {'d': [{'e': 4}, {'f': 5}]}) k, v = iterator.next() self.assertEqual(k, 'foo') self.assertEqual(v, {'a': 'world', 'b': 'hello'}) self.assertRaises(StopIteration, iterator.next) def test_smart_iterkeys(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] iterator = d.iterkeys() self.assertEqual(iterator.next(), 'a') self.assertEqual(iterator.next(), 'c') self.assertEqual(iterator.next(), 'foo') self.assertRaises(StopIteration, iterator.next) def test_smart_itervalues(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] iterator = d.itervalues() self.assertEqual(iterator.next(), [{'b': 1}, {'b': 2}, {'b': 3}]) self.assertEqual(iterator.next(), {'d': [{'e': 4}, {'f': 5}]}) self.assertEqual(iterator.next(), {'a': 'world', 'b': 'hello'}) self.assertRaises(StopIteration, iterator.next) def test_smart_has_key(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] self.assertTrue('c' in d) self.assertFalse('v' in d) self.assertTrue('c.d' in d) self.assertTrue('foo.b' in d) def test_smart_repr(self): d = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] self.assertEqual(repr(d), "{'a': [{'b': 1}, {'b': 2}, {'b': 3}], " + "'c': {'d': [{'e': 4}, {'f': 5}]}, " + "'foo': {'a': 'world', 'b': 'hello'}}") del d['c'] self.assertEqual(repr(d), "{'a': [{'b': 1}, {'b': 2}, {'b': 3}], " + "'foo': {'a': 'world', 'b': 'hello'}}") def test_smart_update(self): d = SmartDict() d2 = SmartDict() d['foo'] = {'a': 'world', 'b': 'hello'} d['c.d'] = [{'e': 4}, {'f': 5}] d['a'] = [{'b': 1}, {'b': 2}, {'b': 3}] d2['qwerty'] = {'t': 'u', 'i': 'o'} d2['qwe.rty'] = [{'n': 34}, {'x': 3}] d.update(d2) d3 = SmartDict({'a': [{'b': 1}, {'b': 2}, {'b': 3}], 'c': {'d': [{'e': 4}, {'f': 5}]}, 'foo': {'a': 'world', 'b': 'hello'}, 'qwe': {'rty': [{'n': 34}, {'x': 3}]}, 'qwerty': {'i': 'o', 't': 'u'}}) self.assertTrue(d == d3) class TestDotableDict(InvenioTestCase): def test_get_attr(self): dotable = DotableDict({'a': [{'b': 3, 'c': 5}]}) self.assertEqual(dotable.a, [{'b': 3, 'c': 5}]) def test_set_attr(self): dotable = DotableDict({'a': [{'b': 3, 'c': 5}]}) dotable.d = 42 self.assertEqual(dotable.d, 42) class TestFlattenMultict(InvenioTestCase): def test_flatten_multidict(self): d = MultiDict({'a': 3, 'b': {'c': 5}}) d2 = flatten_multidict(d) self.assertEqual(d2, {'a': 3, 'b': {'c': 5}})
SamiHiltunen/invenio-utils
tests/test_utils_datastructures.py
Python
gpl-2.0
12,697
from translate.lang import factory def test_punctranslate(): """Tests that we can translate punctuation.""" language = factory.getlanguage("ar") assert language.punctranslate("") == "" assert language.punctranslate("abc efg") == "abc efg" assert language.punctranslate("abc efg.") == "abc efg." assert language.punctranslate("abc, efg; d?") == "abc، efg؛ d؟" # See https://github.com/translate/translate/issues/1819 assert language.punctranslate("It is called “abc”") == "It is called ”abc“" def test_sentences(): """Tests basic functionality of sentence segmentation.""" language = factory.getlanguage("ar") sentences = language.sentences("") assert sentences == [] sentences = language.sentences('يوجد بالفعل مجلد بالإسم "%s". أترغب في استبداله؟') print(sentences) assert sentences == ['يوجد بالفعل مجلد بالإسم "%s".', "أترغب في استبداله؟"] # This probably doesn't make sense: it is just the above reversed, to make sure # we test the '؟' as an end of sentence marker. sentences = language.sentences('أترغب في استبداله؟ يوجد بالفعل مجلد بالإسم "%s".') print(sentences) assert sentences == ["أترغب في استبداله؟", 'يوجد بالفعل مجلد بالإسم "%s".']
translate/translate
translate/lang/test_ar.py
Python
gpl-2.0
1,388
# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for MPI-2 partially implemented or # built, or MPI-1 implementations, perhaps providing a subset of MPI-2 from textwrap import dedent try: import mpiregexes as Re except ImportError: from conf import mpiregexes as Re class Node(object): REGEX = None def match(self, line): m = self.REGEX.search(line) if m: return m.groups() match = classmethod(match) CONFIG = None HEADER = None HEADER_HEAD = """\ #ifdef PyMPI_MISSING_%(name)s #undef %(cname)s """ HEADER_TAIL = """ #endif """ def init(self, name, **kargs): assert name is not None self.name = name self.__dict__.update(kargs) def config(self): return self.CONFIG % vars(self) def header(self): head = dedent(self.HEADER_HEAD) body = dedent(self.HEADER) tail = dedent(self.HEADER_TAIL) return (head+body+tail) % vars(self) class NodeType(Node): CONFIG = ('%(ctype)s v;\n' '%(ctype)s *p = &v; *p=v;') def __init__(self, ctype): self.init(name=ctype, cname=ctype, ctype=ctype,) class NodeStruct(NodeType): REGEX = Re.STRUCT_TYPE HEADER = """\ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, ctype, cfields): super(NodeStruct, self).__init__(ctype) self.cfields = '\n'.join([' %s %s;' % field for field in cfields]) class NodeFuncType(NodeType): HEADER = dedent("""\ typedef %(crett)s (PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s""") def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname, ctype=cname+'*',) self.crett = crett self.cargs = cargs or 'void' if calias is not None: self.HEADER = '#define %(cname)s %(calias)s' self.calias = calias class NodeValue(Node): CONFIG = ('%(ctype)s v; v = %(cname)s;\n' '%(ctype)s *p = &v; *p = %(cname)s;') HEADER = '#define %(cname)s (%(calias)s)' def __init__(self, ctype, cname, calias): self.init(name=cname, cname=cname, ctype=ctype, calias=calias) def ctypefix(ct): ct = ct.strip() ct = ct.replace('[][3]',' (*)[3]') ct = ct.replace('[]','*') return ct class NodeFuncProto(Node): CONFIG = '%(crett)s v; v = %(cname)s(%(cargscall)s); if(v)v=(%(crett)s)0;' HEADER = ' '. join(['#define %(cname)s(%(cargsnamed)s)', 'PyMPI_UNAVAILABLE("%(name)s"%(comma)s%(cargsnamed)s)']) def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname) self.crett = crett if cargs == 'void': cargs = '' if cargs: cargs = cargs.split(',') if cargs[-1].strip() == '...': del cargs[-1] else: cargs = [] self.cargstype = cargs nargs = len(cargs) if nargs: self.comma = ',' else: self.comma = '' cargscall = ['(%s)0' % ctypefix(a) for a in cargs] self.cargscall = ','.join(cargscall) cargsnamed = ['a%d' % (a+1) for a in range(nargs)] self.cargsnamed = ','.join(cargsnamed) if calias is not None: self.HEADER = '#define %(cname)s %(calias)s' self.calias = calias class IntegralType(NodeType): REGEX = Re.INTEGRAL_TYPE HEADER = dedent("""\ typedef long PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""") class OpaqueType(NodeType): REGEX = Re.OPAQUE_TYPE HEADER = dedent("""\ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""") class StructType(NodeStruct): def __init__(self, ctype): cnames = ['MPI_SOURCE', 'MPI_TAG', 'MPI_ERROR'] cfields = list(zip(['int']*3, cnames)) super(StructType, self).__init__(ctype, cfields) class FunctionType(NodeFuncType): REGEX = Re.FUNCTION_TYPE class EnumValue(NodeValue): REGEX = Re.ENUM_VALUE def __init__(self, cname, calias): self.init(name=cname, cname=cname, ctype='int', calias=calias) class HandleValue(NodeValue): REGEX = Re.HANDLE_VALUE HEADER = '#define %(cname)s ((%(ctype)s)%(calias)s)' #def __init__(self, *a, **k): # NodeValue.__init__(self, *a, **k) # print self.__dict__ # if self.cname.endswith('_NULL'): # self.HEADER = '#define %(cname)s ((%(ctype)s)%(calias)s)' class BasicValuePtr(NodeValue): REGEX = Re.BASICP_VALUE HEADER = '#define %(cname)s ((%(ctype)s)%(calias)s)' class StructValuePtr(NodeValue): REGEX = Re.STRUCTP_VALUE class FunctionValuePtr(NodeValue): REGEX = Re.FUNCTP_VALUE class FunctionProto(NodeFuncProto): REGEX = Re.FUNCTION_PROTO class FIntType(NodeType): REGEX = Re.FINT_TYPE HEADER = dedent("""\ typedef int PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""") class FIntValuePtr(BasicValuePtr): REGEX = Re.FINTP_VALUE class FunctionC2F(NodeFuncProto): REGEX = Re.FUNCTION_C2F HEADER = ' '. join(['#define %(cname)s(%(cargsnamed)s)', '((%(crett)s)0)']) class FunctionF2C(NodeFuncProto): REGEX = Re.FUNCTION_F2C HEADER = ' '. join(['#define %(cname)s(%(cargsnamed)s)', '%(cretv)s']) def __init__(self, *a, **k): NodeFuncProto.__init__(self, *a, **k) self.cretv = self.crett.upper() + '_NULL' class Scanner(object): NODE_TYPES = [ FIntType, FIntValuePtr, FunctionC2F, FunctionF2C, IntegralType, StructType, OpaqueType, HandleValue, EnumValue, BasicValuePtr, StructValuePtr, FunctionType, FunctionValuePtr, FunctionProto, ] def __init__(self): self.nodes = [] self.nodemap = {} def parse_file(self, filename): fileobj = open(filename) try: self.parse_lines(fileobj) finally: fileobj.close() def parse_lines(self, lines): for line in lines: self.parse_line(line) def parse_line(self, line): nodemap = self.nodemap nodelist = self.nodes for nodetype in self.NODE_TYPES: args = nodetype.match(line) if args: node = nodetype(*args) assert node.name not in nodemap, node.name nodemap[node.name] = len(nodelist) nodelist.append(node) break def __iter__(self): return iter(self.nodes) def itertests(self): for node in self: yield (node.name, node.config()) CONFIG_HEAD = """\ #ifndef PyMPI_CONFIG_H #define PyMPI_CONFIG_H """ CONFIG_MACRO = '#define PyMPI_MISSING_%s 1\n' CONFIG_TAIL = """\ #endif /* !PyMPI_CONFIG_H */ """ def dump_config_h(self, fileobj, suite): if isinstance(fileobj, str): fileobj = open(fileobj, 'w') try: self.dump_config_h(fileobj, suite) finally: fileobj.close() return head = dedent(self.CONFIG_HEAD) macro = dedent(self.CONFIG_MACRO) tail = dedent(self.CONFIG_TAIL) fileobj.write(head) if suite is None: for node in self: fileobj.write(macro % node.name) else: for name, result in suite: assert name in self.nodemap if not result: fileobj.write(macro % name) fileobj.write(tail) MISSING_HEAD = """\ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { return -1; } """ MISSING_TAIL = """\ #endif /* !PyMPI_MISSING_H */ """ def dump_missing_h(self, fileobj, suite): if isinstance(fileobj, str): fileobj = open(fileobj, 'w') try: self.dump_missing_h(fileobj, suite) finally: fileobj.close() return head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) # fileobj.write(head) if suite is None: for node in self: fileobj.write(node.header()) else: nodelist = self.nodes nodemap = self.nodemap for name, result in suite: assert name in nodemap, name if not result: node = nodelist[nodemap[name]] fileobj.write(node.header()) fileobj.write(tail) # ----------------------------------------- if __name__ == '__main__': import sys, os sources = [os.path.join('src', 'include', 'mpi4py', 'mpi.pxi')] log = lambda msg: sys.stderr.write(msg + '\n') scanner = Scanner() for filename in sources: #filename = os.path.join('src', 'mpi4py', filename) log('parsing file %s' % filename) scanner.parse_file(filename) log('processed %d definitions' % len(scanner.nodes)) config_h = os.path.join('src', 'config.h') missing_h = os.path.join('src', 'missing.h') log('writing file %s' % config_h) scanner.dump_config_h(config_h, None) log('writing file %s' % missing_h) scanner.dump_missing_h(missing_h, None) # -----------------------------------------
capoe/espressopp.soap
contrib/mpi4py/mpi4py-1.3/conf/mpiscanner.py
Python
gpl-3.0
10,154
""" Binds a cmake executable as a rez package. """ from __future__ import absolute_import from rez.package_maker__ import make_package from rez.bind._utils import check_version, find_exe, extract_version, make_dirs from rez.utils.platform_ import platform_ from rez.system import system from rez.utils.lint_helper import env import os.path def setup_parser(parser): parser.add_argument("--exe", type=str, metavar="PATH", help="manually specify the cmake executable to bind.") def commands(): env.PATH.append('{this.root}/bin') def bind(path, version_range=None, opts=None, parser=None): exepath = find_exe("cmake", getattr(opts, "exe", None)) version = extract_version(exepath, "--version") check_version(version, version_range) def make_root(variant, root): binpath = make_dirs(root, "bin") link = os.path.join(binpath, "cmake") platform_.symlink(exepath, link) with make_package("cmake", path, make_root=make_root) as pkg: pkg.version = version pkg.tools = ["cmake"] pkg.commands = commands pkg.variants = [system.variant] return "cmake", version
saddingtonbaynes/rez
src/rez/bind/cmake.py
Python
gpl-3.0
1,170
import time class Stopwatch(object): def __init__(self): self.start_time = time.time() def time(self): return time.time() - self.start_time
tanglu-org/tgl-misago
misago/stopwatch.py
Python
gpl-3.0
165
# TNC Python interface # @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ # Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ TNC: A python interface to the TNC non-linear optimizer TNC is a non-linear optimizer. To use it, you must provide a function to minimize. The function must take one argument: the list of coordinates where to evaluate the function; and it must return either a tuple, whose first element is the value of the function, and whose second argument is the gradient of the function (as a list of values); or None, to abort the minimization. """ from scipy.optimize import moduleTNC from numpy import asarray, inf, array __all__ = ['fmin_tnc'] MSG_NONE = 0 # No messages MSG_ITER = 1 # One line per iteration MSG_INFO = 2 # Informational messages MSG_VERS = 4 # Version info MSG_EXIT = 8 # Exit reasons MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT MSGS = { MSG_NONE : "No messages", MSG_ITER : "One line per iteration", MSG_INFO : "Informational messages", MSG_VERS : "Version info", MSG_EXIT : "Exit reasons", MSG_ALL : "All messages" } INFEASIBLE = -1 # Infeasible (low > up) LOCALMINIMUM = 0 # Local minima reach (|pg| ~= 0) FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) MAXFUN = 3 # Max. number of function evaluations reach LSFAIL = 4 # Linear search failed CONSTANT = 5 # All lower bounds are equal to the upper bounds NOPROGRESS = 6 # Unable to progress USERABORT = 7 # User requested end of minimization RCSTRINGS = { INFEASIBLE : "Infeasible (low > up)", LOCALMINIMUM : "Local minima reach (|pg| ~= 0)", FCONVERGED : "Converged (|f_n-f_(n-1)| ~= 0)", XCONVERGED : "Converged (|x_n-x_(n-1)| ~= 0)", MAXFUN : "Max. number of function evaluations reach", LSFAIL : "Linear search failed", CONSTANT : "All lower bounds are equal to the upper bounds", NOPROGRESS : "Unable to progress", USERABORT : "User requested end of minimization" } # Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in # SciPy import optimize approx_fprime = optimize.approx_fprime def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-8, scale=None, offset=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, rescale=-1, disp=None): """ Minimize a function with variables subject to bounds, using gradient information in a truncated Newton algorithm. This method wraps a C implementation of the algorithm. Parameters ---------- func : callable ``func(x, *args)`` Function to minimize. Must do one of 1. Return f and g, where f is the value of the function and g its gradient (a list of floats). 2. Return the function value but supply gradient function seperately as fprime 3. Return the function value and set approx_grad=True. If the function returns None, the minimization is aborted. x0 : list of floats Initial estimate of minimum. fprime : callable ``fprime(x, *args)`` Gradient of func. If None, then either func must return the function value and the gradient (``f,g = func(x, *args)``) or approx_grad must be True. args : tuple Arguments to pass to function. approx_grad : bool If true, approximate the gradient numerically. bounds : list (min, max) pairs for each element in x0, defining the bounds on that parameter. Use None or +/-inf for one of min or max when there is no bound in that direction. epsilon: float Used if approx_grad is True. The stepsize in a finite difference approximation for fprime. scale : list of floats Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and 1+|x] fo the others. Defaults to None offset : float Value to substract from each variable. If None, the offsets are (up+low)/2 for interval bounded variables and x for the others. messages : Bit mask used to select messages display during minimization values defined in the MSGS dict. Defaults to MGS_ALL. disp : int Integer interface to messages. 0 = no message, 5 = all messages maxCGit : int Maximum number of hessian*vector evaluations per main iteration. If maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)). Defaults to -1. maxfun : int Maximum number of function evaluation. if None, maxfun is set to max(100, 10*len(x0)). Defaults to None. eta : float Severity of the line search. if < 0 or > 1, set to 0.25. Defaults to -1. stepmx : float Maximum step for the line search. May be increased during call. If too small, it will be set to 10.0. Defaults to 0. accuracy : float Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. fmin : float Minimum function value estimate. Defaults to 0. ftol : float Precision goal for the value of f in the stoping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol : float Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. pgtol : float Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. rescale : float Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. Returns ------- x : list of floats The solution. nfeval : int The number of function evaluations. rc : int Return code as defined in the RCSTRINGS dict. Notes ----- The underlying algorithm is truncated Newton, also called Newton Conjugate-Gradient. This method differs from scipy.optimize.fmin_ncg in that 1. It wraps a C implementation of the algorithm 2. It allows each variable to be given an upper and lower bound. The algorithm incoporates the bound constraints by determining the descent direction as in an unconstrained truncated Newton, but never taking a step-size large enough to leave the space of feasible x's. The algorithm keeps track of a set of currently active constraints, and ignores them when computing the minimum allowable step size. (The x's associated with the active constraint are kept fixed.) If the maximum allowable step size is zero then a new constraint is added. At the end of each iteration one of the constraints may be deemed no longer active and removed. A constraint is considered no longer active is if it is currently active but the gradient for that variable points inward from the constraint. The specific constraint removed is the one associated with the variable of largest index whose constraint is no longer active. References ---------- Wright S., Nocedal J. (2006), 'Numerical Optimization' Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", SIAM Journal of Numerical Analysis 21, pp. 770-778 """ x0 = asarray(x0, dtype=float).tolist() n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if disp is not None: messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, 4:MSG_EXIT, 5:MSG_ALL}.get(disp, MSG_ALL) if approx_grad: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, list(g) elif fprime is None: def func_and_grad(x): x = asarray(x) f, g = func(x, *args) return f, list(g) else: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = fprime(x, *args) return f, list(g) """ low, up : the bounds (lists of floats) if low is None, the lower bounds are removed. if up is None, the upper bounds are removed. low and up defaults to None """ low = [0]*n up = [0]*n for i in range(n): if bounds[i] is None: l, u = -inf, inf else: l,u = bounds[i] if l is None: low[i] = -inf else: low[i] = l if u is None: up[i] = inf else: up[i] = u if scale is None: scale = [] if offset is None: offset = [] if maxfun is None: maxfun = max(100, 10*len(x0)) rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset, messages, maxCGit, maxfun, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, rescale) return array(x), nf, rc if __name__ == '__main__': # Examples for TNC def example(): print "Example" # A function to minimize def function(x): f = pow(x[0],2.0)+pow(abs(x[1]),3.0) g = [0,0] g[0] = 2.0*x[0] g[1] = 3.0*pow(abs(x[1]),2.0) if x[1]<0: g[1] = -g[1] return f, g # Optimizer call x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10])) print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc] print "x =", x print "exact value = [0, 1]" print example()
ygenc/onlineLDA
onlineldavb_new/build/scipy/scipy/optimize/tnc.py
Python
gpl-3.0
11,437
#!/usr/bin/env python # # Copyright 2010,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from __future__ import print_function from __future__ import division from __future__ import unicode_literals from gnuradio import gr, digital from gnuradio import filter from gnuradio import blocks import sys import numpy try: from gnuradio import channels except ImportError: print("Error: Program requires gr-channels.") sys.exit(1) try: from matplotlib import pyplot except ImportError: print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).") sys.exit(1) fftlen = 8192 def main(): N = 10000 fs = 2000.0 Ts = 1.0 / fs t = numpy.arange(0, N*Ts, Ts) # When playing with the number of channels, be careful about the filter # specs and the channel map of the synthesizer set below. nchans = 10 # Build the filter(s) bw = 1000 tb = 400 proto_taps = filter.firdes.low_pass_2(1, nchans*fs, bw, tb, 80, filter.firdes.WIN_BLACKMAN_hARRIS) print("Filter length: ", len(proto_taps)) # Create a modulated signal npwr = 0.01 data = numpy.random.randint(0, 256, N) rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41) src = blocks.vector_source_b(data.astype(numpy.uint8).tolist(), False) mod = digital.bpsk_mod(samples_per_symbol=2) chan = channels.channel_model(npwr) rrc = filter.fft_filter_ccc(1, rrc_taps) # Split it up into pieces channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2) # Put the pieces back together again syn_taps = [nchans*t for t in proto_taps] synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True) src_snk = blocks.vector_sink_c() snk = blocks.vector_sink_c() # Remap the location of the channels # Can be done in synth or channelizer (watch out for rotattions in # the channelizer) synthesizer.set_channel_map([ 0, 1, 2, 3, 4, 15, 16, 17, 18, 19]) tb = gr.top_block() tb.connect(src, mod, chan, rrc, channelizer) tb.connect(rrc, src_snk) vsnk = [] for i in range(nchans): tb.connect((channelizer,i), (synthesizer, i)) vsnk.append(blocks.vector_sink_c()) tb.connect((channelizer,i), vsnk[i]) tb.connect(synthesizer, snk) tb.run() sin = numpy.array(src_snk.data()[1000:]) sout = numpy.array(snk.data()[1000:]) # Plot original signal fs_in = nchans*fs f1 = pyplot.figure(1, figsize=(16,12), facecolor='w') s11 = f1.add_subplot(2,2,1) s11.psd(sin, NFFT=fftlen, Fs=fs_in) s11.set_title("PSD of Original Signal") s11.set_ylim([-200, -20]) s12 = f1.add_subplot(2,2,2) s12.plot(sin.real[1000:1500], "o-b") s12.plot(sin.imag[1000:1500], "o-r") s12.set_title("Original Signal in Time") start = 1 skip = 2 s13 = f1.add_subplot(2,2,3) s13.plot(sin.real[start::skip], sin.imag[start::skip], "o") s13.set_title("Constellation") s13.set_xlim([-2, 2]) s13.set_ylim([-2, 2]) # Plot channels nrows = int(numpy.sqrt(nchans)) ncols = int(numpy.ceil(float(nchans) / float(nrows))) f2 = pyplot.figure(2, figsize=(16,12), facecolor='w') for n in range(nchans): s = f2.add_subplot(nrows, ncols, n+1) s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in) s.set_title("Channel {0}".format(n)) s.set_ylim([-200, -20]) # Plot reconstructed signal fs_out = 2*nchans*fs f3 = pyplot.figure(3, figsize=(16,12), facecolor='w') s31 = f3.add_subplot(2,2,1) s31.psd(sout, NFFT=fftlen, Fs=fs_out) s31.set_title("PSD of Reconstructed Signal") s31.set_ylim([-200, -20]) s32 = f3.add_subplot(2,2,2) s32.plot(sout.real[1000:1500], "o-b") s32.plot(sout.imag[1000:1500], "o-r") s32.set_title("Reconstructed Signal in Time") start = 0 skip = 4 s33 = f3.add_subplot(2,2,3) s33.plot(sout.real[start::skip], sout.imag[start::skip], "o") s33.set_title("Constellation") s33.set_xlim([-2, 2]) s33.set_ylim([-2, 2]) pyplot.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
jdemel/gnuradio
gr-filter/examples/reconstruction.py
Python
gpl-3.0
4,364
HOST = "172.31.28.140:27017" PORT = "" USER = "" PASSWORD = "" DATABASE = "google" READ_PREFERENCE = "primary" COLLECTION_INPUT = "average_ratioevent" COLLECTION_OUTPUT = "analysis_ratio" PREFIX_COLUMN = "g_" ATTRIBUTES = ["event type", "total cpu task","total memory task", "total balanced task"] SORT = ["_id.filepath", "_id.numline"] OPERATION_TYPE = "ALL" INPUT_FILE = "mean_ratio_cpu_memory.csv" OUTPUT_FILE = "analysis_ratio_cpu_memory_0.csv"
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 1A/instances/6_workflow_full_10files_primary_1sh_1rs_with_annot_with_proj_3s/analysisevent_0/ConfigDB_Analysis_AverageEvent_0.py
Python
gpl-3.0
451
#!/usr/bin/env python # # Copyright 2012,2013,2015 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from gnuradio import gr from gnuradio import blocks from gnuradio import blocks import sys try: from gnuradio import qtgui from PyQt5 import QtWidgets, Qt import sip except ImportError: print("Error: Program requires PyQt5 and gr-qtgui.") sys.exit(1) class dialog_box(QtWidgets.QWidget): def __init__(self, display): QtWidgets.QWidget.__init__(self, None) self.setWindowTitle('PyQt Test GUI') self.boxlayout = QtWidgets.QBoxLayout( QtWidgets.QBoxLayout.LeftToRight, self) self.boxlayout.addWidget(display, 1) self.resize(800, 500) class my_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) self.qapp = QtWidgets.QApplication(sys.argv) data0 = 10 * [0, ] + 40 * [1, 0] + 10 * [0, ] data0 += 10 * [0, ] + 40 * [0, 1] + 10 * [0, ] data1 = 20 * [0, ] + [0, 0, 0, 1, 1, 1, 0, 0, 0, 0] + 70 * [0, ] # Adjust these to change the layout of the plot. # Can be set to fractions. ncols = 100.25 nrows = 100 fs = 200 src0 = blocks.vector_source_b(data0, True) src1 = blocks.vector_source_b(data1, True) thr = blocks.throttle(gr.sizeof_char, 50000) head = blocks.head(gr.sizeof_char, 10000000) self.snk1 = qtgui.time_raster_sink_b(fs, nrows, ncols, [], [], "Time Raster Example", 2, None) self.connect(src0, thr, (self.snk1, 0)) self.connect(src1, (self.snk1, 1)) # Get the reference pointer to the SpectrumDisplayForm QWidget pyQt = self.snk1.qwidget() # Wrap the pointer as a PyQt SIP object # This can now be manipulated as a PyQt5.QtWidgets.QWidget pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget) self.main_box = dialog_box(pyWin) self.main_box.show() if __name__ == "__main__": tb = my_top_block() tb.start() tb.qapp.exec_() tb.stop()
dl1ksv/gnuradio
gr-qtgui/examples/pyqt_time_raster_b.py
Python
gpl-3.0
2,170
import frappe from frappe.custom.doctype.custom_field.custom_field import create_custom_fields def execute(): company = frappe.get_all('Company', filters = {'country': 'India'}) if not company: return custom_fields = { 'Sales Invoice Item': [ dict(fieldname='taxable_value', label='Taxable Value', fieldtype='Currency', insert_after='base_net_amount', hidden=1, options="Company:company:default_currency", print_hide=1) ] } create_custom_fields(custom_fields, update=True)
mhbu50/erpnext
erpnext/patches/v12_0/create_taxable_value_field.py
Python
gpl-3.0
499
import sys PY_VERSION = sys.version_info[0]
words-in-passing/words-in-passing.github.io
marklog/compat.py
Python
agpl-3.0
45
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-06-02 15:45 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('base', '0130_learningcontaineryear_campus'), ] operations = [ migrations.AlterField( model_name='learningcomponentyear', name='type', field=models.CharField(blank=True, choices=[('LECTURING', 'LECTURING'), ('PRACTICAL_EXERCISES', 'PRACTICAL_EXERCISES')], max_length=30, null=True), ), migrations.AlterField( model_name='learningcontaineryear', name='container_type', field=models.CharField(blank=True, choices=[('COURSE', 'COURSE'), ('INTERNSHIP', 'INTERNSHIP'), ('DISSERTATION', 'DISSERTATION'), ('OTHER_COLLECTIVE', 'OTHER_COLLECTIVE'), ('OTHER_INDIVIDUAL', 'OTHER_INDIVIDUAL'), ('MASTER_THESIS', 'MASTER_THESIS'), ('EXTERNAL', 'EXTERNAL')], max_length=20, null=True), ), migrations.AlterField( model_name='learningunityear', name='subtype', field=models.CharField(blank=True, choices=[('FULL', 'FULL'), ('PARTIM', 'PARTIM'), ('INTERNSHIP', 'INTERNSHIP'), ('TEACHING_INTERNSHIP', 'TEACHING_INTERNSHIP'), ('CLINICAL_INTERNSHIP', 'CLINICAL_INTERNSHIP'), ('PROFESSIONAL_INTERNSHIP', 'PROFESSIONAL_INTERNSHIP'), ('RESEARCH_INTERNSHIP', 'RESEARCH_INTERNSHIP')], max_length=50, null=True), ), ]
uclouvain/osis_louvain
base/migrations/0131_auto_20170602_1745.py
Python
agpl-3.0
1,485
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('logframe', '0005_auto_20151215_1204'), ('contacts', '0001_initial'), ] operations = [ migrations.AddField( model_name='user', name='last_viewed_logframe', field=models.ForeignKey(to='logframe.LogFrame', null=True), ), ]
daniell/kashana
django/website/contacts/migrations/0002_user_last_viewed_logframe.py
Python
agpl-3.0
472
import requests from sqlalchemy import desc from inbox.basicauth import OAuthError from inbox.search.base import SearchBackendException from inbox.auth.oauth import OAuthRequestsWrapper from inbox.models import Message, Thread, Account from inbox.models.backends.gmail import g_token_manager from nylas.logging import get_logger from inbox.api.kellogs import APIEncoder from inbox.models.session import session_scope log = get_logger() PROVIDER = 'gmail' SEARCH_CLS = 'GmailSearchClient' class GmailSearchClient(object): def __init__(self, account): self.account_id = int(account.id) try: with session_scope(self.account_id) as db_session: self.account = db_session.query(Account).get(self.account_id) self.auth_token = g_token_manager.get_token_for_email(self.account) db_session.expunge_all() except OAuthError: raise SearchBackendException( "This search can't be performed because the account's " "credentials are out of date. Please reauthenticate and try " "again.", 403) def search_messages(self, db_session, search_query, offset=0, limit=40): # We need to get the next limit + offset terms if we want to # offset results from the db. g_msgids = self._search(search_query, limit=limit + offset) if not g_msgids: return [] query = db_session.query(Message). \ filter(Message.namespace_id == self.account.namespace.id, Message.g_msgid.in_(g_msgids)). \ order_by(desc(Message.received_date)) if offset: query = query.offset(offset) if limit: query = query.limit(limit) return query.all() # We're only issuing a single request to the Gmail API so there's # no need to stream it. def stream_messages(self, search_query): def g(): encoder = APIEncoder() with session_scope(self.account_id) as db_session: yield encoder.cereal(self.search_messages(db_session, search_query)) + '\n' return g def search_threads(self, db_session, search_query, offset=0, limit=40): # We need to get the next limit + offset terms if we want to # offset results from the db. g_msgids = self._search(search_query, limit=limit + offset) if not g_msgids: return [] query = db_session.query(Thread). \ join(Message, Message.thread_id == Thread.id). \ filter(Thread.namespace_id == self.account.namespace.id, Thread.deleted_at == None, Message.namespace_id == self.account.namespace.id, Message.g_msgid.in_(g_msgids)). \ order_by(desc(Message.received_date)) if offset: query = query.offset(offset) if limit: query = query.limit(limit) return query.all() def stream_threads(self, search_query): def g(): encoder = APIEncoder() with session_scope(self.account_id) as db_session: yield encoder.cereal(self.search_threads(db_session, search_query)) + '\n' return g def _search(self, search_query, limit): results = [] params = dict(q=search_query, maxResults=limit) # Could have used while True: but I don't like infinite loops. for i in range(1, 10): ret = requests.get( u'https://www.googleapis.com/gmail/v1/users/me/messages', params=params, auth=OAuthRequestsWrapper(self.auth_token)) log.info('Gmail API search request completed', elapsed=ret.elapsed.total_seconds()) if ret.status_code != 200: log.critical('HTTP error making search request', account_id=self.account.id, url=ret.url, response=ret.content) raise SearchBackendException( "Error issuing search request", 503, server_error=ret.content) data = ret.json() if 'messages' not in data: return results # Note that the Gmail API returns g_msgids in hex format. So for # example the IMAP X-GM-MSGID 1438297078380071706 corresponds to # 13f5db9286538b1a in the API response we have here. results = results + [int(m['id'], 16) for m in data['messages']] if len(results) >= limit: return results[:limit] if 'nextPageToken' not in data: return results else: # We don't have <limit> results and there's more to fetch --- # get them! params['pageToken'] = data['nextPageToken'] log.info('Getting next page of search results') continue # If we've been through the loop 10 times, it means we got a request # a crazy-high offset --- raise an error. log.error('Too many search results', query=search_query, limit=limit) raise SearchBackendException("Too many results", 400)
closeio/nylas
inbox/search/backends/gmail.py
Python
agpl-3.0
5,314
# coding=utf-8 from __future__ import absolute_import __author__ = "Gina Häußge <osd@foosel.net>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" import requests import logging from ..exceptions import ConfigurationInvalid RELEASE_URL = "https://api.github.com/repos/{user}/{repo}/releases" logger = logging.getLogger("octoprint.plugins.softwareupdate.version_checks.github_release") def _filter_out_latest(releases, sort_key=None, include_prerelease=False, prerelease_channel=None): """ Filters out the newest of all matching releases. Tests: >>> release_1_2_15 = dict(name="1.2.15", tag_name="1.2.15", html_url="some_url", published_at="2016-07-29T19:53:29Z", prerelease=False, draft=False, target_commitish="prerelease") >>> release_1_2_16rc1 = dict(name="1.2.16rc1", tag_name="1.2.16rc1", html_url="some_url", published_at="2016-08-29T12:00:00Z", prerelease=True, draft=False, target_commitish="rc/maintenance") >>> release_1_2_16rc2 = dict(name="1.2.16rc2", tag_name="1.2.16rc2", html_url="some_url", published_at="2016-08-30T12:00:00Z", prerelease=True, draft=False, target_commitish="rc/maintenance") >>> release_1_2_17rc1 = dict(name="1.2.17rc1", tag_name="1.2.17rc1", html_url="some_url", published_at="2016-08-31T12:00:00Z", prerelease=True, draft=True, target_commitish="rc/maintenance") >>> release_1_3_0rc1 = dict(name="1.3.0rc1", tag_name="1.3.0rc1", html_url="some_url", published_at="2016-12-12T12:00:00Z", prerelease=True, draft=False, target_commitish="rc/devel") >>> release_1_4_0rc1 = dict(name="1.4.0rc1", tag_name="1.4.0rc1", html_url="some_url", published_at="2017-12-12T12:00:00Z", prerelease=True, draft=False, target_commitish="rc/future") >>> releases = [release_1_2_15, release_1_2_16rc1, release_1_2_16rc2, release_1_2_17rc1, release_1_3_0rc1, release_1_4_0rc1] >>> _filter_out_latest(releases, include_prerelease=False, prerelease_channel=None) ('1.2.15', '1.2.15', 'some_url') >>> _filter_out_latest(releases, include_prerelease=True, prerelease_channel="rc/maintenance") ('1.2.16rc2', '1.2.16rc2', 'some_url') >>> _filter_out_latest(releases, include_prerelease=True, prerelease_channel="rc/devel") ('1.3.0rc1', '1.3.0rc1', 'some_url') >>> _filter_out_latest(releases, include_prerelease=True, prerelease_channel=None) ('1.4.0rc1', '1.4.0rc1', 'some_url') >>> _filter_out_latest(releases, include_prerelease=True, prerelease_channel="rc/doesntexist") ('1.2.15', '1.2.15', 'some_url') >>> _filter_out_latest([release_1_2_17rc1]) (None, None, None) >>> _filter_out_latest([release_1_2_16rc1, release_1_2_16rc2]) (None, None, None) """ nothing = None, None, None if sort_key is None: sort_key = lambda release: release.get("published_at", None) # filter out prereleases and drafts filter_function = lambda rel: not rel["prerelease"] and not rel["draft"] if include_prerelease: if prerelease_channel: filter_function = lambda rel: not rel["draft"] and ( not rel["prerelease"] or rel["target_commitish"] == prerelease_channel) else: filter_function = lambda rel: not rel["draft"] releases = filter(filter_function, releases) if not releases: return nothing # sort by sort_key releases = sorted(releases, key=sort_key) # latest release = last in list latest = releases[-1] return latest["name"], latest["tag_name"], latest.get("html_url", None) def _get_latest_release(user, repo, compare_type, include_prerelease=False, prerelease_channel=None, force_base=True): nothing = None, None, None r = requests.get(RELEASE_URL.format(user=user, repo=repo)) from . import log_github_ratelimit log_github_ratelimit(logger, r) if not r.status_code == requests.codes.ok: return nothing releases = r.json() # sanitize required_fields = {"name", "tag_name", "html_url", "draft", "prerelease", "published_at", "target_commitish"} releases = filter(lambda rel: set(rel.keys()) & required_fields == required_fields, releases) comparable_factory = _get_comparable_factory(compare_type, force_base=force_base) sort_key = lambda release: comparable_factory(_get_sanitized_version(release["tag_name"])) return _filter_out_latest(releases, sort_key=sort_key, include_prerelease=include_prerelease, prerelease_channel=prerelease_channel) def _get_sanitized_version(version_string): """ Removes "-..." prefix from version strings. Tests: >>> _get_sanitized_version("1.2.15") '1.2.15' >>> _get_sanitized_version("1.2.15-dev12") '1.2.15' """ if "-" in version_string: version_string = version_string[:version_string.find("-")] return version_string def _get_base_from_version_tuple(version_tuple): """ Reduces version tuple to base version. Tests: >>> _get_base_from_version_tuple(("1", "2", "15")) ('1', '2', '15') >>> _get_base_from_version_tuple(("1", "2", "15", "*", "dev12")) ('1', '2', '15') """ base_version = [] for part in version_tuple: if part.startswith("*"): break base_version.append(part) return tuple(base_version) def _get_comparable_version_pkg_resources(version_string, force_base=True): import pkg_resources version = pkg_resources.parse_version(version_string) if force_base: if isinstance(version, tuple): # old setuptools version = _get_base_from_version_tuple(version) else: # new setuptools version = pkg_resources.parse_version(version.base_version) return version def _get_comparable_version_semantic(version_string, force_base=True): import semantic_version version = semantic_version.Version.coerce(version_string, partial=False) if force_base: version_string = "{}.{}.{}".format(version.major, version.minor, version.patch) version = semantic_version.Version.coerce(version_string, partial=False) return version def _get_sanitized_compare_type(compare_type, custom=None): if not compare_type in ("python", "python_unequal", "semantic", "semantic_unequal", "unequal", "custom") or compare_type == "custom" and custom is None: compare_type = "python" return compare_type def _get_comparable_factory(compare_type, force_base=True): if compare_type in ("python", "python_unequal"): return lambda version: _get_comparable_version_pkg_resources(version, force_base=force_base) elif compare_type in ("semantic", "semantic_unequal"): return lambda version: _get_comparable_version_semantic(version, force_base=force_base) else: return lambda version: version def _get_comparator(compare_type, custom=None): if compare_type in ("python", "semantic"): return lambda a, b: a >= b elif compare_type == "custom": return custom else: return lambda a, b: a == b def _is_current(release_information, compare_type, custom=None, force_base=True): """ Checks if the provided release information indicates the version being the most current one. Tests: >>> _is_current(dict(remote=dict(value=None)) True >>> _is_current(dict(local=dict(value="1.2.15"), remote=dict(value="1.2.16"))) False >>> _is_current(dict(local=dict(value="1.2.16dev1"), remote=dict(value="1.2.16dev2"))) True >>> _is_current(dict(local=dict(value="1.2.16dev1"), remote=dict(value="1.2.16dev2")), force_base=False) False >>> _is_current(dict(local=dict(value="1.2.16dev3"), remote=dict(value="1.2.16dev2")), force_base=False) True >>> _is_current(dict(local=dict(value="1.2.16dev3"), remote=dict(value="1.2.16dev2")), force_base=False, compare_type="python_unequal") False """ if release_information["remote"]["value"] is None: return True compare_type = _get_sanitized_compare_type(compare_type, custom=custom) comparable_factory = _get_comparable_factory(compare_type, force_base=force_base) comparator = _get_comparator(compare_type, custom=custom) sanitized_local = _get_sanitized_version(release_information["local"]["value"]) sanitized_remote = _get_sanitized_version(release_information["remote"]["value"]) try: return comparator(comparable_factory(sanitized_local), comparable_factory(sanitized_remote)) except: logger.exception("Could not check if version is current due to an error, assuming it is") return True def get_latest(target, check, custom_compare=None): if not "user" in check or not "repo" in check: raise ConfigurationInvalid("github_release update configuration for %s needs user and repo set" % target) current = check.get("current", None) include_prerelease = check.get("prerelease", False) prerelease_channel = check.get("prerelease_channel", None) force_base = check.get("force_base", True) compare_type = _get_sanitized_compare_type(check.get("release_compare", "python"), custom=custom_compare) remote_name, remote_tag, release_notes = _get_latest_release(check["user"], check["repo"], compare_type, include_prerelease=include_prerelease, prerelease_channel=prerelease_channel, force_base=force_base) information =dict( local=dict(name=current, value=current), remote=dict(name=remote_name, value=remote_tag, release_notes=release_notes) ) logger.debug("Target: %s, local: %s, remote: %s" % (target, current, remote_tag)) return information, _is_current(information, compare_type, custom=custom_compare, force_base=force_base)
nickverschoor/OctoPrint
src/octoprint/plugins/softwareupdate/version_checks/github_release.py
Python
agpl-3.0
10,218
# -*- coding: utf-8 -*- #*************************************************************************** #* Copyright (c) 2010 Juergen Riegel <juergen.riegel@web.de> * #* * #* This file is part of the FreeCAD CAx development system. * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* FreeCAD is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with FreeCAD; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #***************************************************************************/ import FreeCAD import unittest import math def tu(str): return FreeCAD.Units.Quantity(str).Value def ts(q): return q.UserString def ts2(q): return FreeCAD.Units.Quantity(q.UserString).UserString #--------------------------------------------------------------------------- # define the functions to test the FreeCAD UnitApi code #--------------------------------------------------------------------------- def compare(x, y): return math.fabs(x - y) < 0.00001 class UnitBasicCases(unittest.TestCase): def setUp(self): par = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units") dec = par.GetInt("Decimals") self.delta = math.pow(10,-dec) def testConversions(self): #tu = FreeCAD.Units.translateUnit self.failUnless(compare(tu('10 m'), 10000.0)) self.failUnless(compare(tu('3/8 in'), 9.525)) self.failUnless(compare(tu('100 km/h'), 27777.77777777)) self.failUnless(compare(tu('m^2*kg*s^-3*A^-2'), 1000000.0)) self.failUnless(compare(tu('(m^2*kg)/(A^2*s^3)'), 1000000.0)) self.failUnless(compare(tu('2*pi rad'), 360.0)) self.failUnless(compare(tu('2*pi rad') / tu('gon'), 400.0)) self.failUnless(compare(tu('999 kg') / tu('1 m^3'), 0.000009999)) def testImperial(self): #tu = FreeCAD.Units.translateUnit self.failUnless(compare(tu('3/8in'), 9.525)) #self.failUnless(compare(tu('1fo(3+7/16)in'),392.112500))thisgivesaparsersyntaxerror!!! self.failUnless(compare(tu('1\'(3+7/16)"'), 392.112500)) psi = FreeCAD.Units.parseQuantity("1psi") mpa = psi.getValueAs("MPa").Value self.assertAlmostEqual(0.006894744825, mpa, delta=self.delta) kpa = psi.getValueAs("kPa").Value self.assertAlmostEqual(6.894744825494, kpa, delta=self.delta) ksi = FreeCAD.Units.parseQuantity("1ksi") mpa = ksi.getValueAs("MPa").Value self.assertAlmostEqual(6.894744825494, mpa, delta=self.delta) kpa = ksi.getValueAs("kPa").Value self.assertAlmostEqual(6894.744825494, kpa, delta=self.delta) def testSelfConsistency(self): qu = FreeCAD.Units.Quantity("0.23 W/m/K") self.assertTrue(ts(qu), ts2(qu)) qu = FreeCAD.Units.Quantity("237 mm*kg/(s^3*K)") self.assertTrue(ts(qu), ts2(qu)) qu = FreeCAD.Units.Quantity("237.000 W/mm/K") self.assertTrue(ts(qu), ts2(qu)) def testDivide(self): qu1 = FreeCAD.Units.Quantity("1 m/s") qu2 = FreeCAD.Units.Quantity("m/s") self.assertTrue(qu1/qu2, 1) def testSchemes(self): schemes = FreeCAD.Units.listSchemas() num = len(schemes) psi = FreeCAD.Units.parseQuantity("1psi") for i in range(num): t = FreeCAD.Units.schemaTranslate(psi, i) v = FreeCAD.Units.parseQuantity(t[0]).getValueAs("psi") self.assertAlmostEqual(1, v.Value, msg="Failed with \"{0}\" scheme: {1} != 1 (delta: {2})".format(schemes[i], v.Value, self.delta), delta=self.delta) ksi = FreeCAD.Units.parseQuantity("1ksi") for i in range(num): t = FreeCAD.Units.schemaTranslate(ksi, i) v = FreeCAD.Units.parseQuantity(t[0]).getValueAs("ksi") self.assertAlmostEqual(1, v.Value, msg="Failed with \"{0}\" scheme: {1} != 1 (delta: {2})".format(schemes[i], v.Value, self.delta), delta=self.delta) def testSchemeTranslation(self): quantities = [] for i in dir(FreeCAD.Units): if issubclass(type(getattr(FreeCAD.Units, i)), FreeCAD.Units.Quantity): quantities.append(i) schemes = FreeCAD.Units.listSchemas() for i in quantities: q1 = getattr(FreeCAD.Units, i) q1 = FreeCAD.Units.Quantity(q1) q1.Format = {'Precision': 16} for idx, val in enumerate(schemes): t = FreeCAD.Units.schemaTranslate(q1, idx) try: q2 = FreeCAD.Units.Quantity(t[0]) if math.fabs(q1.Value - q2.Value) > 0.01: print (" {} : {} : {} : {} : {}".format(q1, q2, t, i, val).encode("utf-8").strip()) except Exception as e: s = "{}: {}".format(e, t[0]) print (" ".join(e).encode("utf-8").strip()) def testVoltage(self): q1 = FreeCAD.Units.Quantity("1e20 V") t = FreeCAD.Units.schemaTranslate(q1, 0) # Standard q2 = FreeCAD.Units.Quantity(t[0]) self.assertAlmostEqual(q1.Value, q2.Value, delta=self.delta) def testEnergy(self): q1 = FreeCAD.Units.Quantity("1e20 J") t = FreeCAD.Units.schemaTranslate(q1, 0) # Standard q2 = FreeCAD.Units.Quantity(t[0]) self.assertAlmostEqual(q1.Value, q2.Value, delta=self.delta) def testTrigonometric(self): #tu=FreeCAD.Units.translateUnit self.failUnless(compare(tu('sin(pi)'), math.sin(math.pi))) self.failUnless(compare(tu('cos(pi)'), math.cos(math.pi))) self.failUnless(compare(tu('tan(pi)'), math.tan(math.pi)))
sanguinariojoe/FreeCAD
src/Mod/Test/UnitTests.py
Python
lgpl-2.1
6,978
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyDlcpar(PythonPackage): """DLCpar is a reconciliation method for inferring gene duplications, losses, and coalescence (accounting for incomplete lineage sorting).""" homepage = "https://www.cs.hmc.edu/~yjw/software/dlcpar/" url = "https://www.cs.hmc.edu/~yjw/software/dlcpar/pub/sw/dlcpar-1.0.tar.gz" version('1.0', sha256='774319caba0f10d1230b8f85b8a147eda5871f9a316d7b3381b91c1bde97aa0a') depends_on('py-numpy', type=('build', 'run'))
iulian787/spack
var/spack/repos/builtin/packages/py-dlcpar/package.py
Python
lgpl-2.1
696
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import ast import hashlib import spack.repo import spack.package import spack.directives import spack.error import spack.spec import spack.util.naming class RemoveDocstrings(ast.NodeTransformer): """Transformer that removes docstrings from a Python AST.""" def remove_docstring(self, node): if node.body: if isinstance(node.body[0], ast.Expr) and \ isinstance(node.body[0].value, ast.Str): node.body.pop(0) self.generic_visit(node) return node def visit_FunctionDef(self, node): # noqa return self.remove_docstring(node) def visit_ClassDef(self, node): # noqa return self.remove_docstring(node) def visit_Module(self, node): # noqa return self.remove_docstring(node) class RemoveDirectives(ast.NodeTransformer): """Remove Spack directives from a package AST.""" def __init__(self, spec): self.spec = spec def is_directive(self, node): """Check to determine if the node is a valid directive Directives are assumed to be represented in the AST as a named function call expression. This means that they will NOT be represented by a named function call within a function call expression (e.g., as callbacks are sometimes represented). Args: node (AST): the AST node being checked Returns: (bool): ``True`` if the node represents a known directive, ``False`` otherwise """ return (isinstance(node, ast.Expr) and node.value and isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name) and node.value.func.id in spack.directives.__all__) def is_spack_attr(self, node): return (isinstance(node, ast.Assign) and node.targets and isinstance(node.targets[0], ast.Name) and node.targets[0].id in spack.package.Package.metadata_attrs) def visit_ClassDef(self, node): # noqa if node.name == spack.util.naming.mod_to_class(self.spec.name): node.body = [ c for c in node.body if (not self.is_directive(c) and not self.is_spack_attr(c))] return node class TagMultiMethods(ast.NodeVisitor): """Tag @when-decorated methods in a spec.""" def __init__(self, spec): self.spec = spec self.methods = {} def visit_FunctionDef(self, node): # noqa nodes = self.methods.setdefault(node.name, []) if node.decorator_list: dec = node.decorator_list[0] if isinstance(dec, ast.Call) and dec.func.id == 'when': try: cond = dec.args[0].s nodes.append( (node, self.spec.satisfies(cond, strict=True))) except AttributeError: # In this case the condition for the 'when' decorator is # not a string literal (for example it may be a Python # variable name). Therefore the function is added # unconditionally since we don't know whether the # constraint applies or not. nodes.append((node, None)) else: nodes.append((node, None)) class ResolveMultiMethods(ast.NodeTransformer): """Remove methods which do not exist if their @when is not satisfied.""" def __init__(self, methods): self.methods = methods def resolve(self, node): if node.name not in self.methods: raise PackageHashError( "Future traversal visited new node: %s" % node.name) result = None for n, cond in self.methods[node.name]: if cond: return n if cond is None: result = n return result def visit_FunctionDef(self, node): # noqa if self.resolve(node) is node: node.decorator_list = [] return node return None def package_content(spec): return ast.dump(package_ast(spec)) def package_hash(spec, content=None): if content is None: content = package_content(spec) return hashlib.sha256(content.encode('utf-8')).digest().lower() def package_ast(spec): spec = spack.spec.Spec(spec) filename = spack.repo.path.filename_for_package_name(spec.name) with open(filename) as f: text = f.read() root = ast.parse(text) root = RemoveDocstrings().visit(root) RemoveDirectives(spec).visit(root) fmm = TagMultiMethods(spec) fmm.visit(root) root = ResolveMultiMethods(fmm.methods).visit(root) return root class PackageHashError(spack.error.SpackError): """Raised for all errors encountered during package hashing."""
iulian787/spack
lib/spack/spack/util/package_hash.py
Python
lgpl-2.1
5,042
"""Partial dependence plots for tree ensembles. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from itertools import count import numbers import numpy as np from scipy.stats.mstats import mquantiles from ..utils.extmath import cartesian from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import map, range, zip from ..utils import array2d from ..tree._tree import DTYPE from ._gradient_boosting import _partial_dependence_tree from .gradient_boosting import BaseGradientBoosting def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100): """Generate a grid of points based on the ``percentiles of ``X``. The grid is generated by placing ``grid_resolution`` equally spaced points between the ``percentiles`` of each column of ``X``. Parameters ---------- X : ndarray The data percentiles : tuple of floats The percentiles which are used to construct the extreme values of the grid axes. grid_resolution : int The number of equally spaced points that are placed on the grid. Returns ------- grid : ndarray All data points on the grid; ``grid.shape[1] == X.shape[1]`` and ``grid.shape[0] == grid_resolution * X.shape[1]``. axes : seq of ndarray The axes with which the grid has been created. """ if len(percentiles) != 2: raise ValueError('percentile must be tuple of len 2') if not all(0. <= x <= 1. for x in percentiles): raise ValueError('percentile values must be in [0, 1]') axes = [] for col in range(X.shape[1]): uniques = np.unique(X[:, col]) if uniques.shape[0] < grid_resolution: # feature has low resolution use unique vals axis = uniques else: emp_percentiles = mquantiles(X, prob=percentiles, axis=0) # create axis based on percentiles and grid resolution axis = np.linspace(emp_percentiles[0, col], emp_percentiles[1, col], num=grid_resolution, endpoint=True) axes.append(axis) return cartesian(axes), axes def partial_dependence(gbrt, target_variables, grid=None, X=None, percentiles=(0.05, 0.95), grid_resolution=100): """Partial dependence of ``target_variables``. Partial dependence plots show the dependence between the joint values of the ``target_variables`` and the function represented by the ``gbrt``. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. target_variables : array-like, dtype=int The target features for which the partial dependecy should be computed (size should be smaller than 3 for visual renderings). grid : array-like, shape=(n_points, len(target_variables)) The grid of ``target_variables`` values for which the partial dependecy should be evaluated (either ``grid`` or ``X`` must be specified). X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. It is used to generate a ``grid`` for the ``target_variables``. The ``grid`` comprises ``grid_resolution`` equally spaced points between the two ``percentiles``. percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the ``grid``. Only if ``X`` is not None. grid_resolution : int, default=100 The number of equally spaced points on the ``grid``. Returns ------- pdp : array, shape=(n_classes, n_points) The partial dependence function evaluated on the ``grid``. For regression and binary classification ``n_classes==1``. axes : seq of ndarray or None The axes with which the grid has been created or None if the grid has been given. Examples -------- >>> samples = [[0, 0, 2], [1, 0, 0]] >>> labels = [0, 1] >>> from sklearn.ensemble import GradientBoostingClassifier >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels) >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2) >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) """ if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) if (grid is None and X is None) or (grid is not None and X is not None): raise ValueError('Either grid or X must be specified') target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel() if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]): raise ValueError('target_variables must be in [0, %d]' % (gbrt.n_features - 1)) if X is not None: X = array2d(X, dtype=DTYPE, order='C') grid, axes = _grid_from_X(X[:, target_variables], percentiles, grid_resolution) else: assert grid is not None # dont return axes if grid is given axes = None # grid must be 2d if grid.ndim == 1: grid = grid[:, np.newaxis] if grid.ndim != 2: raise ValueError('grid must be 2d but is %dd' % grid.ndim) grid = np.asarray(grid, dtype=DTYPE, order='C') assert grid.shape[1] == target_variables.shape[0] n_trees_per_stage = gbrt.estimators_.shape[1] n_estimators = gbrt.estimators_.shape[0] pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = gbrt.estimators_[stage, k].tree_ _partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k]) return pdp, axes def plot_partial_dependence(gbrt, X, features, feature_names=None, label=None, n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), n_jobs=1, verbose=0, ax=None, line_kw=None, contour_kw=None, **fig_kw): """Partial dependence plots for ``features``. The ``len(features)`` plots are arranged in a grid with ``n_cols`` columns. Two-way partial dependence plots are plotted as contour plots. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. features : seq of tuples or ints If seq[i] is an int or a tuple with one int value, a one-way PDP is created; if seq[i] is a tuple of two ints, a two-way PDP is created. feature_names : seq of str Name of each feature; feature_names[i] holds the name of the feature with index i. label : object The class label for which the PDPs should be computed. Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``. n_cols : int The number of columns in the grid plot (default: 3). percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the PDP axes. grid_resolution : int, default=100 The number of equally spaced points on the axes. n_jobs : int The number of CPUs to use to compute the PDs. -1 means 'all CPUs'. Defaults to 1. verbose : int Verbose output during PD computations. Defaults to 0. ax : Matplotlib axis object, default None An axis object onto which the plots will be drawn. line_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For one-way partial dependence plots. contour_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For two-way partial dependence plots. fig_kw : dict Dict with keywords passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig : figure The Matplotlib Figure object. axs : seq of Axis objects A seq of Axis objects, one for each subplot. Examples -------- >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP ... """ import matplotlib.pyplot as plt from matplotlib import transforms from matplotlib.ticker import MaxNLocator from matplotlib.ticker import ScalarFormatter if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) # set label_idx for multi-class GBRT if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2: if label is None: raise ValueError('label is not given for multi-class PDP') label_idx = np.searchsorted(gbrt.classes_, label) if gbrt.classes_[label_idx] != label: raise ValueError('label %s not in ``gbrt.classes_``' % str(label)) else: # regression and binary classification label_idx = 0 X = array2d(X, dtype=DTYPE, order='C') if gbrt.n_features != X.shape[1]: raise ValueError('X.shape[1] does not match gbrt.n_features') if line_kw is None: line_kw = {'color': 'green'} if contour_kw is None: contour_kw = {} # convert feature_names to list if feature_names is None: # if not feature_names use fx indices as name feature_names = [str(i) for i in range(gbrt.n_features)] elif isinstance(feature_names, np.ndarray): feature_names = feature_names.tolist() def convert_feature(fx): if isinstance(fx, six.string_types): try: fx = feature_names.index(fx) except ValueError: raise ValueError('Feature %s not in feature_names' % fx) return fx # convert features into a seq of int tuples tmp_features = [] for fxs in features: if isinstance(fxs, (numbers.Integral,) + six.string_types): fxs = (fxs,) try: fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32) except TypeError: raise ValueError('features must be either int, str, or tuple ' 'of int/str') if not (1 <= np.size(fxs) <= 2): raise ValueError('target features must be either one or two') tmp_features.append(fxs) features = tmp_features names = [] try: for fxs in features: l = [] # explicit loop so "i" is bound for exception below for i in fxs: l.append(feature_names[i]) names.append(l) except IndexError: raise ValueError('features[i] must be in [0, n_features) ' 'but was %d' % i) # compute PD functions pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)(gbrt, fxs, X=X, grid_resolution=grid_resolution) for fxs in features) # get global min and max values of PD grouped by plot type pdp_lim = {} for pdp, axes in pd_result: min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max() n_fx = len(axes) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) if ax is None: fig = plt.figure(**fig_kw) else: fig = ax.get_figure() fig.clear() n_cols = min(n_cols, len(features)) n_rows = int(np.ceil(len(features) / float(n_cols))) axs = [] for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result): ax = fig.add_subplot(n_rows, n_cols, i + 1) if len(axes) == 1: ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw) else: # make contour plot assert len(axes) == 2 XX, YY = np.meshgrid(axes[0], axes[1]) Z = pdp[label_idx].reshape(list(map(np.size, axes))).T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], alpha=0.75, **contour_kw) ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) # plot data deciles + axes labels deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) ylim = ax.get_ylim() ax.vlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_xlabel(name[0]) ax.set_ylim(ylim) # prevent x-axis ticks from overlapping ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower')) tick_formatter = ScalarFormatter() tick_formatter.set_powerlimits((-3, 4)) ax.xaxis.set_major_formatter(tick_formatter) if len(axes) > 1: # two-way PDP - y-axis deciles + labels deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) xlim = ax.get_xlim() ax.hlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_ylabel(name[1]) # hline erases xlim ax.set_xlim(xlim) else: ax.set_ylabel('Partial dependence') if len(axes) == 1: ax.set_ylim(pdp_lim[1]) axs.append(ax) fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4, hspace=0.3) return fig, axs
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/ensemble/partial_dependence.py
Python
apache-2.0
14,897
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions used to construct graphs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import functools import linecache import os import re import sys import threading import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow as c_api from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import cpp_shape_inference_pb2 from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import versions from tensorflow.python.ops import control_flow_util from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # Temporary global switch determining if we should enable the work-in-progress # calls to the C API. Currently disabled by default but can be manually enabled # in code or via the environment variable. This will be removed once all # functionality is supported and there's no performance penalty with it enabled. _USE_C_API = os.getenv("TF_C_API_GRAPH_CONSTRUCTION", "1") is not "0" _USE_C_SHAPES = os.getenv("TF_C_API_GRAPH_CONSTRUCTION_SHAPES", "0") is not "0" def tensor_id(tensor): """Returns a unique identifier for this Tensor.""" return tensor._id # pylint: disable=protected-access class _NullContextmanager(object): def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def _override_helper(clazz_object, operator, func): """Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator has already been overwritten, or if operator is not allowed to be overwritten. """ existing = getattr(clazz_object, operator, None) if existing is not None: # Check to see if this is a default method-wrapper or slot wrapper which # will be true for the comparison operators. if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): """EXPERIMENTAL: Returns true if `t` implements the tensor interface. See `register_dense_tensor_like_type()` for the current definition of a "tensor-like type". Args: t: An object. Returns: True iff `t` is an instance of one of the registered "tensor-like" types. """ return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): """EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface. A "tensor-like type" can represent a single dense tensor, and implements the `name` and `dtype` properties. Args: tensor_type: A type implementing the tensor interface. Raises: TypeError: If `tensor_type` does not implement the tensor interface. """ try: if not isinstance(tensor_type.name, property): raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) try: if not isinstance(tensor_type.dtype, property): raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) # We expect this list to be small, so choose quadratic complexity # for registration, so that we have a tuple that can be used for # more efficient `isinstance` checks later. global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): """A unique (within this program execution) integer.""" return c_api.TFE_Py_UID() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: text = repr(tensor.numpy()) if is_repr else str(tensor.numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text # NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose. class _TensorLike(object): """Internal cls for grouping Tensor, SparseTensor, ..., for is_instance.""" pass @tf_export("Tensor") class Tensor(_TensorLike): """Represents one of the outputs of an `Operation`. A `Tensor` is a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow @{tf.Session}. This class has two primary purposes: 1. A `Tensor` can be passed as an input to another `Operation`. This builds a dataflow connection between operations, which enables TensorFlow to execute an entire `Graph` that represents a large, multi-step computation. 2. After the graph has been launched in a session, the value of the `Tensor` can be computed by passing it to @{tf.Session.run}. `t.eval()` is a shortcut for calling `tf.get_default_session().run(t)`. In the following example, `c`, `d`, and `e` are symbolic `Tensor` objects, whereas `result` is a numpy array that stores a concrete value: ```python # Build a dataflow graph. c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) e = tf.matmul(c, d) # Construct a `Session` to execute the graph. sess = tf.Session() # Execute the graph and store the value that `e` represents in `result`. result = sess.run(e) ``` """ # List of Python operators that we allow to override. OVERLOADABLE_OPERATORS = { # Binary. "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", # Unary. "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) if _USE_C_API: # This will be set by set_shape_and_handle_data_for_outputs. self._shape_val = None else: # The Python code requires all tensors start with a shape to support shape # inference on imported while loops. This isn't necessary with the C API # enabled because the C API provides the shapes for imported nodes. # TODO(skyewm): remove when _USE_C_API is removed. self._shape_val = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] if not _USE_C_SHAPES: # Attributes used for C++ shape inference. Not inspected, only forwarded. # If set, will be a HandleData object from cpp_shape_inference.proto. self._handle_data = None self._id = uid() @property def op(self): """The `Operation` that produces this tensor as an output.""" return self._op @property def dtype(self): """The `DType` of elements in this tensor.""" return self._dtype @property def graph(self): """The `Graph` that contains this tensor.""" return self._op.graph @property def name(self): """The string name of this tensor.""" if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) return "%s:%d" % (self._op.name, self._value_index) @property def device(self): """The name of the device on which this tensor will be produced, or None.""" return self._op.device @property def shape(self): """Returns the `TensorShape` that represents the shape of this tensor. The shape is computed using shape inference functions that are registered in the Op for each `Operation`. See @{tf.TensorShape} for more details of what a shape represents. The inferred shape of a tensor is used to provide shape information without having to launch the graph in a session. This can be used for debugging, and providing early error messages. For example: ```python c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) print(c.shape) ==> TensorShape([Dimension(2), Dimension(3)]) d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) print(d.shape) ==> TensorShape([Dimension(4), Dimension(2)]) # Raises a ValueError, because `c` and `d` do not have compatible # inner dimensions. e = tf.matmul(c, d) f = tf.matmul(c, d, transpose_a=True, transpose_b=True) print(f.shape) ==> TensorShape([Dimension(3), Dimension(4)]) ``` In some cases, the inferred shape may have unknown dimensions. If the caller has additional information about the values of these dimensions, `Tensor.set_shape()` can be used to augment the inferred shape. Returns: A `TensorShape` representing the shape of this tensor. """ if self._shape_val is None: if _USE_C_SHAPES: self._shape_val = self._c_api_shape() else: assert _USE_C_API # Call set_shape_and_handle_data_for_outputs in topological order on all # ops that are needed to compute self.op's shape. We do this instead of # having set_shape_and_handle_data_for_outputs recursively call # Operation.shape on self.op.inputs to overflowing the call stack. need_shapes = self._get_input_ops_without_shapes(self.op) need_shapes.sort(key=lambda op: op._id) for op in need_shapes: set_shape_and_handle_data_for_outputs(op) return self._shape_val def _get_input_ops_without_shapes(self, target_op): """Returns ops needing shape inference to compute target_op's shape.""" result = [] stack = [self._op] visited = set() while stack: op = stack.pop() if op in visited: continue result.append(op) stack.extend(t.op for t in op.inputs if t._shape_val is None) visited.add(op) return result def _c_api_shape(self): """Returns the TensorShape of this tensor according to the C API.""" c_graph = self._op._graph._c_graph # pylint: disable=protected-access shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper( c_graph, self._as_tf_output()) if unknown_shape: return tensor_shape.unknown_shape() else: shape_vector = [None if d == -1 else d for d in shape_vector] return tensor_shape.TensorShape(shape_vector) @property def _shape(self): logging.warning("Tensor._shape is private, use Tensor.shape " "instead. Tensor._shape will eventually be removed.") return self.shape @_shape.setter def _shape(self, value): raise ValueError( "Tensor._shape cannot be assigned, use Tensor.set_shape instead.") def __iter__(self): if not context.executing_eagerly(): raise TypeError( "Tensor objects are not iterable when eager execution is not " "enabled. To iterate over this tensor use tf.map_fn.") shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") for i in xrange(shape[0]): yield self[i] def _shape_as_list(self): if self.shape.ndims is not None: return [dim.value for dim in self.shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): """Integer rank of this Tensor, if known, else None. Returns: Integer rank or None """ return self.shape.ndims def get_shape(self): """Alias of Tensor.shape.""" return self.shape def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.shape) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.shape) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` Args: shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ if _USE_C_SHAPES: # pylint: disable=protected-access # Reset cached shape. self._shape_val = None else: self._shape_val = self.shape.merge_with(shape) if not self._op._graph._c_graph: return # Update C shape even if _USE_C_SHAPES = False, since we still want # set_shape to be reflected in the C API graph for when we run it. if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) dim_list = [] if shape.dims is None: unknown_shape = True else: unknown_shape = False for dim in shape.dims: if dim.value is None: dim_list.append(-1) else: dim_list.append(dim.value) try: c_api.TF_GraphSetTensorShape_wrapper( self._op._graph._c_graph, # pylint: disable=protected-access self._as_tf_output(), dim_list, unknown_shape) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) @property def value_index(self): """The index of this tensor in the outputs of its `Operation`.""" return self._value_index def consumers(self): """Returns a list of `Operation`s that consume this tensor. Returns: A list of `Operation`s. """ if self._op._c_op: # pylint: disable=protected-access consumer_names = c_api.TF_OperationOutputConsumers_wrapper( self._as_tf_output()) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(name) for name in consumer_names ] # pylint: enable=protected-access else: return self._consumers def _add_consumer(self, consumer): """Add a consumer to this tensor. Args: consumer: an Operation. Raises: TypeError: if the consumer is not an Operation. """ # pylint: disable=protected-access assert not self._op._c_op, "Tensor._add_consumer doesn't work with C API" # pylint: enable=protected-access if not isinstance(consumer, Operation): raise TypeError("Consumer must be an Operation: %s" % consumer) self._consumers.append(consumer) def _as_node_def_input(self): """Return a value to use for the NodeDef "input" attribute. The returned string can be used in a NodeDef "input" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string. """ if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): # pylint: disable=protected-access assert self.op._c_op return c_api_util.tf_output(self.op._c_op, self.value_index) # pylint: enable=protected-access def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): # Necessary to support Python's collection membership operators return id(self) def __eq__(self, other): # Necessary to support Python's collection membership operators return id(self) == id(other) def __copy__(self): # Make sure _shape_val is computed before we copy. # TODO(b/77597810): get rid of Tensor copies. if self._shape_val is None: set_shape_and_handle_data_for_outputs(self.op) cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result # NOTE(mrry): This enables the Tensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (e.g. in an `if` statement). For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` This disallows ambiguities between testing the Python value vs testing the dynamic condition of the `Tensor`. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def __nonzero__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This is the Python 2.x counterpart to `__bool__()` above. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def eval(self, feed_dict=None, session=None): """Evaluates this tensor in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor. """ return _eval_using_default_session(self, feed_dict, self.graph, session) # TODO(agarwal): consider getting rid of this. class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" @property def dtype(self): # Note: using the intern table directly here as this is # performance-sensitive in some models. return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access def numpy(self): """Returns a numpy array or a scalar with the same contents as the Tensor. TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying buffer but instead always explicitly copy? Note that currently it may or may not copy based on whether the numpy data is properly aligned or not. Returns: A numpy array or a scalar. Numpy array may share memory with the Tensor object. Any changes to one may be reflected in the other. A scalar value is returned when self has rank 0. Raises: ValueError: if the type of this Tensor is not representable in numpy. """ if self.dtype == dtypes.resource: raise ValueError("Resource handles are not convertible to numpy.") return self.cpu()._numpy() # pylint: disable=protected-access # __int__ and __float__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. def __int__(self): return int(self.numpy()) def __float__(self): return float(self.numpy()) def __array__(self, dtype=None): return np.array(self.numpy(), dtype=dtype) def __format__(self, format_spec): return self.numpy().__format__(format_spec) def _numpy(self): raise NotImplementedError() def __copy__(self): # Eager Tensors are immutable so it's safe to return themselves as a copy. return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): """The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape. """ raise NotImplementedError() def _rank(self): """Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank """ raise NotImplementedError() def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name raise NotImplementedError() def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % ( self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True)) @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" # pylint: disable=protected-access # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) # Record the copy on tape and define backprop copy as well. if context.executing_eagerly(): self_device = self.device def grad_fun(dresult): return [dresult._copy(device_name=self_device)] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor # pylint: enable=protected-access @property def shape(self): if self._tensor_shape is None: # pylint: disable=access-member-before-definition # `_tensor_shape` is declared and defined in the definition of # `EagerTensor`, in C. self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple()) return self._tensor_shape def get_shape(self): """Alias of Tensor.shape.""" return self.shape def _shape_as_list(self): """The shape of the tensor as a list.""" return list(self._shape_tuple()) @property def ndim(self): """Returns the number of Tensor dimensions.""" return self.shape.ndims def cpu(self): """A copy of this Tensor with contents backed by host memory.""" return self._copy(context.context(), "CPU:0") def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. Arguments: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor. """ return self._copy(context.context(), "GPU:" + str(gpu_index)) def __bool__(self): if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison raise ValueError( "Non-scalar tensor %s cannot be converted to boolean." % repr(self)) if self.dtype != dtypes.bool: raise ValueError( "Non-boolean tensor %s cannot be converted to boolean." % repr(self)) return bool(self.cpu().numpy()) def __nonzero__(self): return self.__bool__() def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "Tensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError( "Tensor.op is meaningless when eager execution is enabled.") @property def graph(self): raise AttributeError( "Tensor.graph is meaningless when eager execution is enabled.") @property def name(self): raise AttributeError( "Tensor.name is meaningless when eager execution is enabled.") @property def value_index(self): raise AttributeError( "Tensor.value_index is meaningless when eager execution is enabled.") def consumers(self): raise NotImplementedError( "Tensor.consumers is meaningless when eager execution is enabled.") def _add_consumer(self, consumer): raise NotImplementedError( "_add_consumer not supported when eager execution is enabled.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported when eager execution is enabled.") def _as_tf_output(self): raise NotImplementedError( "_as_tf_output not supported when eager execution is enabled.") def eval(self, feed_dict=None, session=None): raise NotImplementedError( "eval is not supported when eager execution is enabled, " "is .numpy() what you're looking for?" ) # This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and # registers it with the current module. EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase) def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False): _ = name, as_ref if dtype and not dtype.is_compatible_with(t.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, t.dtype.name, str(t))) return t _tensor_conversion_func_registry = { 0: [(Tensor, _TensorTensorConversionFunction)] } _tensor_conversion_func_cache = {} _tensor_conversion_func_lock = threading.Lock() register_dense_tensor_like_type(Tensor) @tf_export("convert_to_tensor") def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: An `Output` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def internal_convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts the given `value` to an `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: This function can be useful when composing a new operation in Python All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. as_ref: True if we want the mutable view of Variables, if applicable. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: Optional: The value of context.context(). Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ if ctx is None: ctx = context.context() if ctx.executing_eagerly(): # Fast path for EagerTensors that don't need any conversion. if isinstance(value, EagerTensor): # Note that we don't check that value's dtype matches the dtype # argument. We expect that the C runtime will do that checking # when we execute the kernel. return value if dtype is not None: dtype = dtypes.as_dtype(dtype) unwrapped_type = type(value) conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None) if conversion_func_list is None: with _tensor_conversion_func_lock: conversion_func_list = [] for _, funcs_at_priority in sorted( _tensor_conversion_func_registry.items()): for base_type, conversion_func in funcs_at_priority: if isinstance(value, base_type): conversion_func_list.append((base_type, conversion_func)) _tensor_conversion_func_cache[unwrapped_type] = conversion_func_list for base_type, conversion_func in conversion_func_list: # If dtype is None but preferred_dtype is not None, we try to # cast to preferred_dtype first. ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError, errors.UnimplementedError, errors.InvalidArgumentError): # Could not coerce the conversion to use the preferred dtype. ret = None if ret is not None and ret is not NotImplemented: if (ret.dtype.base_dtype != dtypes.as_dtype(preferred_dtype).base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, dtypes.as_dtype(preferred_dtype).base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue if not isinstance(ret, Tensor): raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, unwrapped_type)) def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) @tf_export("convert_to_tensor_or_indexed_slices") def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None): """Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_indexed_slices( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False): """Converts the given object to an `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, _TensorLike): if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref) def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_indexed_slices( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_indexed_slices( values=values, dtype=dtype, name=name, as_ref=False) # TODO(josh11b): Add ctx argument to conversion_func() signature. @tf_export("register_tensor_conversion_function") def register_tensor_conversion_function(base_type, conversion_func, priority=100): """Registers a function for converting objects of `base_type` to `Tensor`. The conversion function must have the following signature: ```python def conversion_func(value, dtype=None, name=None, as_ref=False): # ... ``` It must return a `Tensor` with the given `dtype` if specified. If the conversion function creates a new `Tensor`, it should use the given `name` if specified. All exceptions will be propagated to the caller. The conversion function may return `NotImplemented` for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If `as_ref` is true, the function must return a `Tensor` reference, such as a `Variable`. NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function `F` runs before another conversion function `G`, ensure that `F` is registered with a smaller priority than `G`. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type. """ global _tensor_conversion_func_cache with _tensor_conversion_func_lock: if not (isinstance(base_type, type) or (isinstance(base_type, tuple) and all(isinstance(x, type) for x in base_type))): raise TypeError("base_type must be a type or a tuple of types.") if not callable(conversion_func): raise TypeError("conversion_func must be callable.") try: funcs_at_priority = _tensor_conversion_func_registry[priority] except KeyError: funcs_at_priority = [] _tensor_conversion_func_registry[priority] = funcs_at_priority funcs_at_priority.append((base_type, conversion_func)) _tensor_conversion_func_cache = {} @tf_export("IndexedSlices") class IndexedSlices(_TensorLike): """A sparse representation of a set of tensor slices at given indices. This class is a simple wrapper for a pair of `Tensor` objects: * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`. * `indices`: A 1-D integer `Tensor` with shape `[D0]`. An `IndexedSlices` is typically used to represent a subset of a larger tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`. The values in `indices` are the indices in the first dimension of the slices that have been extracted from the larger tensor. The dense tensor `dense` represented by an `IndexedSlices` `slices` has ```python dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...] ``` The `IndexedSlices` class is used principally in the definition of gradients for operations that have sparse gradients (e.g. @{tf.gather}). Contrast this representation with @{tf.SparseTensor}, which uses multi-dimensional indices and scalar values. """ def __init__(self, values, indices, dense_shape=None): """Creates an `IndexedSlices`.""" _get_graph_from_inputs([values, indices, dense_shape]) self._values = values self._indices = indices self._dense_shape = dense_shape @property def values(self): """A `Tensor` containing the values of the slices.""" return self._values @property def indices(self): """A 1-D `Tensor` containing the indices of the slices.""" return self._indices @property def dense_shape(self): """A 1-D `Tensor` containing the shape of the corresponding dense tensor.""" return self._dense_shape @property def name(self): """The name of this `IndexedSlices`.""" return self.values.name @property def device(self): """The name of the device on which `values` will be produced, or `None`.""" return self.values.device @property def op(self): """The `Operation` that produces `values` as an output.""" return self.values.op @property def dtype(self): """The `DType` of elements in this tensor.""" return self.values.dtype @property def graph(self): """The `Graph` that contains the values, indices, and shape tensors.""" return self._values.graph def __str__(self): return "IndexedSlices(indices=%s, values=%s%s)" % ( self._indices, self._values, (", dense_shape=%s" % self._dense_shape) if self._dense_shape is not None else "") def __neg__(self): return IndexedSlices(-self.values, self.indices, self.dense_shape) IndexedSlicesValue = collections.namedtuple( "IndexedSlicesValue", ["values", "indices", "dense_shape"]) def _device_string(dev_spec): if isinstance(dev_spec, pydev.DeviceSpec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name """Create a NodeDef proto. Args: op_type: Value for the "op" attribute of the NodeDef proto. name: Value for the "name" attribute of the NodeDef proto. device: string, device, or function from NodeDef to string. Value for the "device" attribute of the NodeDef proto. attrs: Optional dictionary where the key is the attribute name (a string) and the value is the respective "attr" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer. """ node_def = node_def_pb2.NodeDef() node_def.op = compat.as_bytes(op_type) node_def.name = compat.as_bytes(name) if attrs is not None: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) if device is not None: if callable(device): node_def.device = device(node_def) else: node_def.device = _device_string(device) return node_def # Copied from core/framework/node_def_util.cc # TODO(mrry,josh11b): Consolidate this validation in C++ code. _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$") def _create_c_op(graph, node_def, inputs, control_inputs): """Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs, e.g. "int64 * N", "list(int64)"). The length of the list should be equal to the number of inputs specified by this operation's op def. control_inputs: A list of `Operation`s to set as control dependencies. Returns: A wrapped TF_Operation*. """ # pylint: disable=protected-access op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: c_api.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: c_api.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = c_api.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) return c_op @tf_export("Operation") class Operation(object): """Represents a graph node that performs computation on tensors. An `Operation` is a node in a TensorFlow `Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as @{tf.matmul}) or @{tf.Graph.create_op}. For example `c = tf.matmul(a, b)` creates an `Operation` of type "MatMul" that takes tensors `a` and `b` as input, and produces `c` as output. After the graph has been launched in a session, an `Operation` can be executed by passing it to @{tf.Session.run}. `op.run()` is a shortcut for calling `tf.get_default_session().run(op)`. """ def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): r"""Creates an `Operation`. NOTE: This constructor validates the name of the `Operation` (passed as `node_def.name`). Valid `Operation` names match the following regular expression: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* Args: node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and `device`. The `input` attribute is irrelevant here as it will be computed when generating the model. g: `Graph`. The parent graph. inputs: list of `Tensor` objects. The inputs to this `Operation`. output_types: list of `DType` objects. List of the types of the `Tensors` computed by this operation. The length of this list indicates the number of output endpoints of the `Operation`. control_inputs: list of operations or tensors from which to have a control dependency. input_types: List of `DType` objects representing the types of the tensors accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect reference-typed inputs must specify these explicitly. original_op: Optional. Used to associate the new `Operation` with an existing `Operation` (for example, a replica with the op that was replicated). op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type that this `Operation` represents. Raises: TypeError: if control inputs are not Operations or Tensors, or if `node_def` is not a `NodeDef`, or if `g` is not a `Graph`, or if `inputs` are not tensors, or if `inputs` and `input_types` are incompatible. ValueError: if the `node_def` name is not valid. """ # For internal use only: `node_def` can be set to a TF_Operation to create # an Operation for that op. This is useful for creating Operations for ops # indirectly created by C API methods, e.g. the ops created by # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields # should be None. if isinstance(node_def, node_def_pb2.NodeDef): if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) c_op = None elif type(node_def).__name__ == "SwigPyObject": assert inputs is None assert output_types is None assert control_inputs is None assert input_types is None assert original_op is None assert op_def is None c_op = node_def else: raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) for a in inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) if input_types is None: input_types = [i.dtype.base_dtype for i in inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (node_def.name, [i.dtype for i in inputs], input_types)) # Build the list of control inputs. control_input_ops = [] if control_inputs: for c in control_inputs: control_op = None if isinstance(c, Operation): control_op = c elif isinstance(c, (Tensor, IndexedSlices)): control_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) control_input_ops.append(control_op) # Don't set private fields with C API enabled to catch users who need to # switch to public API. # TODO(skyewm): delete these fields once we remove _USE_C_API if not self._graph._c_graph: self._inputs_val = list(inputs) # Defensive copy. self._input_types_val = input_types self._control_inputs_val = control_input_ops self._node_def_val = copy.deepcopy(node_def) self._op_def_val = op_def else: # This will be set by self.inputs. self._inputs_val = None self._id_value = self._graph._next_id() # pylint: disable=protected-access self._original_op = original_op self._traceback = self._graph._extract_stack() # pylint: disable=protected-access self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access # Initialize self._c_op. if c_op: # TODO(skyewm): remove this assert when we remove USE_C_API assert self._graph._c_graph # pylint: disable=protected-access self._c_op = c_op elif self._graph._c_graph: # pylint: disable=protected-access if op_def is None: op_def = self._graph._get_op_def(node_def.op) # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs. # Refactor so we don't have to do this here. grouped_inputs = self._reconstruct_sequence_inputs( op_def, inputs, node_def.attr) self._c_op = _create_c_op(self._graph, node_def, grouped_inputs, control_input_ops) else: self._c_op = None # Mark that we consume the inputs. This is unnecessary and unsupported with # the C API enabled, since the C API tracks the tensor consumers instead. if not self._c_op: for input_tensor in self._inputs_val: input_tensor._add_consumer(self) # pylint: disable=protected-access # Initialize self._outputs. if self._c_op: num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i)) for i in range(num_outputs)] assert output_types is not None elif output_types is None: output_types = [] self._output_types_val = output_types self._outputs = [ Tensor(self, i, output_type) for i, output_type in enumerate(output_types) ] if not c_op: self._control_flow_post_processing() def _control_flow_post_processing(self): """Add this op to its control flow context. This may add new ops and change this op's inputs. self.inputs must be available before calling this method. """ for input_tensor in self.inputs: control_flow_util.CheckInputFromValidContext(self, input_tensor.op) if self._control_flow_context is not None: self._control_flow_context.AddOp(self) self._recompute_node_def() def _reconstruct_sequence_inputs(self, op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs). """ grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs def colocation_groups(self): """Returns the list of colocation groups of the op.""" default_colocation_group = [ compat.as_bytes("loc:@%s" % self.name) ] try: class_attr = self.get_attr("_class") except ValueError: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in class_attr if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): """DEPRECATED: Use outputs.""" return tuple(self.outputs) def _get_control_flow_context(self): """Returns the control flow context of this op. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context of this op. Args: ctx: a context object. """ self._control_flow_context = ctx @property def name(self): """The full name of this operation.""" if self._c_op: return c_api.TF_OperationName(self._c_op) else: return self._node_def_val.name @property def _id(self): """The unique integer id of this operation.""" return self._id_value @property def device(self): """The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device. """ if self._c_op: return c_api.TF_OperationDevice(self._c_op) else: return self._node_def_val.device @property def _output_types(self): """List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in c_api.h The length of this list indicates the number of output endpoints of the operation. """ if self._c_op: num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(self._tf_output(i)) for i in xrange(num_outputs) ] # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here. assert self._output_types_val == output_types # In all the tests we have output_types that are passed into # Operation.__init__ are a list of ints (which is illegal according # to the docstring), but input_types are instances of DType. # This extra assert is to catch if we ever use DType for output_types. if output_types: assert isinstance(output_types[0], int) return output_types else: return self._output_types_val def _tf_output(self, output_idx): """Create and return a new TF_Output for output_idx'th output of this op.""" assert self._c_op tf_output = c_api.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): """Create and return a new TF_Input for input_idx'th input of this op.""" assert self._c_op tf_input = c_api.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name """Set the device of this operation. Args: device: string or device.. The device to set. """ if self._c_op: c_api.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access compat.as_str(_device_string(device))) else: self._node_def_val.device = _device_string(device) def _add_input(self, tensor, dtype=None): """Add a new input to this operation. Args: tensor: the Tensor to add as an input. dtype: tf.DType: type of the input; defaults to the tensor's dtype. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ assert not self._c_op, ( "Operation._add_input doesn't work with C API") if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) if dtype is None: dtype = tensor.dtype else: dtype = dtypes.as_dtype(dtype) if not dtype.is_compatible_with(tensor.dtype): raise TypeError( "Cannot convert a tensor of type %s to an input of type %s" % (tensor.dtype.name, dtype.name)) self._inputs_val.append(tensor) self._input_types_val.append(dtype) tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() # TODO(skyewm): Remove `update_dtype` when we enable the C API. def _update_input(self, index, tensor, update_dtype=True): """Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. update_dtype: If `False`, the type for this input is not updated. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Make sure output shapes are already computed for this op in case we create # a cycle (we cannot compute shapes for cycles). Usually shapes are computed # lazily upon request. if not _USE_C_SHAPES: set_shape_and_handle_data_for_outputs(self) if self._c_op: # Reset cached inputs. self._inputs_val = None c_api.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index)) else: self._inputs_val[index].consumers().remove(self) self._inputs_val[index] = tensor if update_dtype: self._input_types_val[index] = tensor.dtype tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() def _add_control_inputs(self, ops): """Add a list of new control inputs to this operation. Args: ops: the list of Operations to add as control input. Raises: TypeError: if ops is not a list of Operations. ValueError: if any op in ops is from a different graph. """ if self._c_op: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: if ops: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) _assert_same_graph(self, op) self._control_inputs_val.append(op) self._recompute_node_def() def _add_control_input(self, op): """Add a new control input to this operation. Args: op: the Operation to add as control input. Raises: TypeError: if op is not an Operation. ValueError: if op is from a different graph. """ if self._c_op: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: self._add_control_inputs([op]) def _remove_all_control_inputs(self): """Removes any control inputs to this operation.""" if self._c_op: c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access else: del self.control_inputs[:] # Methods below are used when building the NodeDef and Graph proto. def _recompute_node_def(self): # TODO(skyewm): remove this function when we switch to C API if self._c_op: return del self._node_def_val.input[:] # pylint: disable=protected-access self._node_def_val.input.extend( [t._as_node_def_input() for t in self._inputs_val]) # pylint: enable=protected-access if self._control_inputs_val: self._node_def_val.input.extend( ["^%s" % op.name for op in self._control_inputs_val]) def __str__(self): return str(self.node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): """The list of `Tensor` objects representing the outputs of this op.""" return self._outputs # pylint: disable=protected-access class _InputList(object): """Immutable input list wrapper.""" def __init__(self, inputs): self._inputs = inputs def __iter__(self): return iter(self._inputs) def __len__(self): return len(self._inputs) def __bool__(self): return bool(self._inputs) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._inputs[i] # pylint: enable=protected-access @property def inputs(self): """The list of `Tensor` objects representing the data inputs of this op.""" if self._c_op: if self._inputs_val is None: tf_outputs = c_api.GetOperationInputs(self._c_op) # pylint: disable=protected-access retval = [ self.graph._get_tensor_by_tf_output(tf_output) for tf_output in tf_outputs ] # pylint: enable=protected-access self._inputs_val = Operation._InputList(retval) return self._inputs_val else: return Operation._InputList(self._inputs_val) @property def _inputs(self): logging.warning("Operation._inputs is private, use Operation.inputs " "instead. Operation._inputs will eventually be removed.") return self.inputs @_inputs.setter def _inputs(self, value): raise ValueError("Cannot assign _inputs") @property def _input_types(self): if self._c_op: num_inputs = c_api.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] return input_types else: return self._input_types_val @_input_types.setter def _input_types(self, value): raise ValueError("Cannot assign _input_types") @property def control_inputs(self): """The `Operation` objects on which this op has a control dependency. Before this op is executed, TensorFlow will ensure that the operations in `self.control_inputs` have finished executing. This mechanism can be used to run ops sequentially for performance reasons, or to ensure that the side effects of an op are observed in the correct order. Returns: A list of `Operation` objects. """ if self._c_op: control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access else: return self._control_inputs_val @property def _control_outputs(self): """The `Operation` objects which have a control dependency on this op. Before any of the ops in self._control_outputs can execute tensorflow will ensure self has finished executing. Returns: A list of `Operation` objects. """ if self._c_op: control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access else: # TODO(apassos) this should be less inefficient. return [o for o in self._graph.get_operations() if self in o.control_inputs] @property def _control_inputs(self): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") return self.control_inputs @_control_inputs.setter def _control_inputs(self, value): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") # Copy value because it may be self._control_inputs_val (in particular if # this is called from self._control_inputs += ...), and we don't want to # clear value below. value = copy.copy(value) self._remove_all_control_inputs() self._add_control_inputs(value) @property def type(self): """The type of the op (e.g. `"MatMul"`).""" if self._c_op: op_type = c_api.TF_OperationOpType(self._c_op) return op_type else: return self._node_def_val.op @property def graph(self): """The `Graph` that contains this operation.""" return self._graph @property def node_def(self): # pylint: disable=line-too-long """Returns the `NodeDef` representation of this operation. Returns: A [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto) protocol buffer. """ # pylint: enable=line-too-long if self._c_op: with c_api_util.tf_buffer() as buf: c_api.TF_OperationToNodeDef(self._c_op, buf) data = c_api.TF_GetBuffer(buf) node_def = node_def_pb2.NodeDef() node_def.ParseFromString(compat.as_bytes(data)) return node_def else: return self._node_def_val @property def _node_def(self): logging.warning("Operation._node_def is private, use Operation.node_def " "instead. Operation._node_def will eventually be removed.") return self.node_def @property def op_def(self): # pylint: disable=line-too-long """Returns the `OpDef` proto that represents the type of this op. Returns: An [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto) protocol buffer. """ # pylint: enable=line-too-long if self._c_op: return self._graph._get_op_def(self.type) else: return self._op_def_val @property def _op_def(self): logging.warning("Operation._op_def is private, use Operation.op_def " "instead. Operation._op_def will eventually be removed.") return self.op_def @property def traceback(self): """Returns the call stack from when this operation was constructed.""" return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access @property def traceback_with_start_lines(self): """Same as traceback but includes start line of function definition. Returns: A list of 5-tuples (filename, lineno, name, code, func_start_lineno). """ return self._graph._convert_stack( # pylint: disable=protected-access self._traceback, include_func_start_lineno=True) def _set_attr(self, attr_name, attr_value): """Private method used to set an attribute in the node_def.""" if self._c_op: buf = c_api.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: # pylint: disable=protected-access c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf) # pylint: enable=protected-access finally: c_api.TF_DeleteBuffer(buf) else: self._node_def_val.attr[attr_name].CopyFrom(attr_value) def get_attr(self, name): """Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`. """ fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] if self._c_op: try: with c_api_util.tf_buffer() as buf: c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = c_api.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) x = attr_value_pb2.AttrValue() x.ParseFromString(data) else: if name not in self._node_def_val.attr: raise ValueError( "No attr named '" + name + "' in " + str(self._node_def_val)) x = self._node_def_val.attr[name] # Treat an empty oneof value as an empty list. if not x.WhichOneof("value"): return [] if x.HasField("list"): for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] else: return list(getattr(x.list, f)) return [] else: for f in fields: if x.HasField(f): if f == "type": return dtypes.as_dtype(getattr(x, f)) else: return getattr(x, f) assert False, "Unsupported field type in " + str(x) def run(self, feed_dict=None, session=None): """Runs this operation in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking `Operation.run()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to run to this operation. If none, the default session will be used. """ _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") @tf_export("RegisterGradient") class RegisterGradient(object): """A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `"Sub"` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient("Sub") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ def __init__(self, op_type): """Creates a new decorator with `op_type` as the Operation type. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers the function `f` as gradient function for `op_type`.""" _gradient_registry.register(f, self._op_type) return f @tf_export("NoGradient", "NotDifferentiable") def NotDifferentiable(op_type): """Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.NotDifferentiable("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Alias for the old name, will be eventually removed. NoGradient = NotDifferentiable def get_gradient_function(op): """Returns the function that computes gradients for "op".""" if not op.inputs: return None try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) _shape_registry = registry.Registry("shape functions") _default_shape_function_registry = registry.Registry("default shape functions") # These are set to common_shapes.call_cpp_shape_fn by op generated code # (generated by python_op_gen.cc). # It is set outside ops.py to avoid a circular dependency. _call_cpp_shape_fn = None _call_cpp_shape_fn_and_require_op = None def _set_call_cpp_shape_fn(call_cpp_shape_fn): """Sets default shape fns from passed common_shapes.call_cpp_shape_fn.""" global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op if _call_cpp_shape_fn: return # already registered def call_without_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=False) _call_cpp_shape_fn = call_without_requiring def call_with_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=True) _call_cpp_shape_fn_and_require_op = call_with_requiring class RegisterShape(object): """No longer used. Was: A decorator for registering a shape function. Shape functions must now be registered via the SetShapeFn on the original Op specification in C++. """ def __init__(self, op_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers "f" as the shape function for "op_type".""" if f is None: assert _call_cpp_shape_fn # None is a special "weak" value that provides a default shape function, # and can be overridden by a non-None registration. try: _default_shape_function_registry.register(_call_cpp_shape_fn, self._op_type) except KeyError: # Ignore duplicate registrations of the weak value. This can # occur if the op library input to wrapper generation # inadvertently links in one or more of the standard op # libraries. pass else: _shape_registry.register(f, self._op_type) return f # TODO(b/74620627): remove when _USE_C_SHAPES is removed def _set_shape_and_handle_data_for_outputs_c_api(op): """Set shapes and resource handle data using info from the C API.""" assert not _USE_C_SHAPES for output in op.outputs: output._shape_val = output._c_api_shape() # Set the resource handle data for compatibility with the Python shape # inference code. serialized = c_api.ResourceHandleShapeAndType( op._graph._c_graph, output._as_tf_output()) if serialized: output._handle_data = ( cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData .FromString(compat.as_bytes(serialized))) else: output._handle_data = None # TODO(b/74620627): remove when _USE_C_SHAPES is removed def set_shape_and_handle_data_for_outputs(op): """Set the shapes and resource handle data for op's outputs. When _USE_C_API = True, this is lazily called when a tensor's shape is first requested. Usually this should work automatically, but some edge cases may require manaully calling this first to make sure Tensor._shape_val and Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a Tensor). """ if _USE_C_SHAPES: return if op.graph._is_function(op.type): for output in op.outputs: output._shape_val = tensor_shape.unknown_shape() return try: shape_func = _shape_registry.lookup(op.type) except LookupError: try: shape_func = _default_shape_function_registry.lookup(op.type) except LookupError: shape_func = _call_cpp_shape_fn_and_require_op shapes = shape_func(op) if shapes is None: raise RuntimeError( "Shape function for op %s did not return any shapes" % op) elif isinstance(shapes, dict): # Returned by call_cpp_shape_fn shapes_dict = shapes shapes = shapes_dict["shapes"] handle_datas = shapes_dict["handle_data"] for output, handle_data in zip(op.outputs, handle_datas): # Don't override any existing handle data that may have been manually set. # pylint: disable=protected-access if output._handle_data is None: output._handle_data = handle_data # pylint: enable=protected-access if len(op.outputs) != len(shapes): raise RuntimeError( "Shape function for op %s returned %d shapes but expected %d %s %s" % (op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes))) for output, s in zip(op.outputs, shapes): output._shape_val = tensor_shape.unknown_shape() output._shape_val = output._shape_val.merge_with(s) class OpStats(object): """A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument. """ def __init__(self, statistic_type, value=None): """Sets up the initial placeholders for the statistics.""" self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): """A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics("Foo", "doohickey") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats("doohickey", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined. """ def __init__(self, op_type, statistic_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): """Registers "f" as the statistics function for "op_type".""" _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): """Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage. """ try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def _name_from_scope_name(name): """Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash). """ return name[:-1] if (name and name[-1] == "/") else name @tf_export("Graph") class Graph(object): """A TensorFlow computation, represented as a dataflow graph. A `Graph` contains a set of @{tf.Operation} objects, which represent units of computation; and @{tf.Tensor} objects, which represent the units of data that flow between operations. A default `Graph` is always registered, and accessible by calling @{tf.get_default_graph}. To add an operation to the default graph, simply call one of the functions that defines a new `Operation`: ```python c = tf.constant(4.0) assert c.graph is tf.get_default_graph() ``` Another typical usage involves the @{tf.Graph.as_default} context manager, which overrides the current default graph for the lifetime of the context: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of "collections" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named @{tf.GraphKeys.GLOBAL_VARIABLES}) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. """ def __init__(self): """Creates a new, empty Graph.""" # Protects core state that can be returned via public accessors, as well as # synchronizes Session.run calls with methods that create and mutate ops # (e.g. Graph.create_op()). This synchronization is necessary because it's # illegal to modify an operation after it's been run. Thread-safety is # provided on a best-effort basis to support buggy programs, and is not # guaranteed by the public `tf.Graph` API. # # The lock must be reentrant because create_op can be called recursively due # to control flow. Without a reentrant lock, many methods would also need a # "locked" version or parameter (including generated code). # # NOTE(mrry): This does not protect the various stacks. A warning will # be reported if these are used from multiple threads self._lock = threading.RLock() self._nodes_by_id = dict() # GUARDED_BY(self._lock) self._next_id_counter = 0 # GUARDED_BY(self._lock) self._nodes_by_name = dict() # GUARDED_BY(self._lock) self._version = 0 # GUARDED_BY(self._lock) # Maps a name used in the graph to the next id to use for that name. self._names_in_use = {} self._stack_state_is_thread_local = False self._thread_local = threading.local() # Functions that will be applied to choose a device if none is specified. # After switch_to_thread_local(), self._thread_local._device_function_stack # is used instead. self._graph_device_function_stack = [] # Default original_op applied to new ops. self._default_original_op = None # Current control flow context. It could be either CondContext or # WhileContext defined in ops/control_flow_ops.py self._control_flow_context = None # A new node will depend of the union of all of the nodes in the stack. # After switch_to_thread_local(), # self._thread_local._control_dependencies_stack is used instead. self._graph_control_dependencies_stack = [] # Arbitrary collections of objects. self._collections = {} # The graph-level random seed self._seed = None # A dictionary of attributes that should be applied to all ops. self._attr_scope_map = {} # A map from op type to the kernel label that should be used. self._op_to_kernel_label_map = {} # A map from op type to an alternative op type that should be used when # computing gradients. self._gradient_override_map = {} # True if the graph is considered "finalized". In that case no # new operations can be added. self._finalized = False # Functions defined in the graph self._functions = collections.OrderedDict() # Default GraphDef versions self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False # Stack of colocate_with ops. After switch_to_thread_local(), # self._thread_local._colocation_stack is used instead. self._graph_colocation_stack = [] # Set of tensors that are dangerous to feed! self._unfeedable_tensors = set() # Set of operations that are dangerous to fetch! self._unfetchable_ops = set() # A map of tensor handle placeholder to tensor dtype. self._handle_feeders = {} # A map from tensor handle to its read op. self._handle_readers = {} # A map from tensor handle to its move op. self._handle_movers = {} # A map from tensor handle to its delete op. self._handle_deleters = {} # Allow optimizers and other objects to pseudo-uniquely key graphs (this key # will be shared when defining function graphs, for example, so optimizers # being called inside function definitions behave as if they were seeing the # actual outside graph). self._graph_key = "grap-key-%d/" % (uid(),) # A string with the last reduction method passed to # losses.compute_weighted_loss(), or None. self._last_loss_reduction = None self._container = "" self._registered_ops = op_def_registry.get_registered_ops() # TODO(skyewm): fold as much of the above as possible into the C # implementation if self._use_c_api_hack(): self._scoped_c_graph = c_api_util.ScopedTFGraph() # The C API requires all ops to have shape functions. Disable this # requirement (many custom ops do not have shape functions, and we don't # want to break these existing cases). c_api.SetRequireShapeInferenceFns(self._c_graph, False) else: self._scoped_c_graph = None # TODO(apassos) remove once the C API is used by default. def _use_c_api_hack(self): """Temporary hack; can be overridden to force C API usage.""" return _USE_C_API def _convert_stack(self, stack, include_func_start_lineno=False): """Converts a stack extracted using _extract_stack() to a traceback stack. Args: stack: A list of n 5-tuples, (filename, lineno, name, frame_globals, func_start_lineno). include_func_start_lineno: True if function start line number should be included as the 5th entry in return tuples. Returns: A list of n 4-tuples or 5-tuples (filename, lineno, name, code, [optional: func_start_lineno]), where the code tuple element is calculated from the corresponding elements of the input tuple. """ ret = [] for (filename, lineno, name, frame_globals, func_start_lineno, unused_frame_info) in stack: linecache.checkcache(filename) line = linecache.getline(filename, lineno, frame_globals) if line: line = line.strip() else: line = None if include_func_start_lineno: ret.append((filename, lineno, name, line, func_start_lineno)) else: ret.append((filename, lineno, name, line)) return ret # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @tf_contextlib.contextmanager def _variable_creator_scope(self, creator): # This step makes a copy of the existing stack, and it also initializes # self._thread_local._variable_creator_stack if it doesn't exist yet. old = list(self._variable_creator_stack) self._thread_local._variable_creator_stack.append(creator) try: yield finally: self._thread_local._variable_creator_stack = old # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @property def _variable_creator_stack(self): if not hasattr(self._thread_local, "_variable_creator_stack"): self._thread_local._variable_creator_stack = [] return list(self._thread_local._variable_creator_stack) @_variable_creator_stack.setter def _variable_creator_stack(self, variable_creator_stack): self._thread_local._variable_creator_stack = variable_creator_stack def _extract_stack(self): """A lightweight, extensible re-implementation of traceback.extract_stack. NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for each stack frame using linecache, which results in an abundance of stat() calls. This implementation does not retrieve the code, and any consumer should apply _convert_stack to the result to obtain a traceback that can be formatted etc. using traceback methods. Derived classes can implement _extract_frame_info() to add extra information to the traceback. Returns: A list of 6-tuples (filename, lineno, name, frame_globals, func_start_lineno, custom_info) corresponding to the call stack of the current thread. """ try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back ret = [] while f is not None: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name frame_globals = f.f_globals func_start_lineno = co.co_firstlineno frame_info = self._extract_frame_info(f) ret.append((filename, lineno, name, frame_globals, func_start_lineno, frame_info)) f = f.f_back ret.reverse() return ret def _extract_frame_info(self, frame): # pylint: disable=unused-argument """Extracts custom information from a frame in an op traceback.""" return None def _check_not_finalized(self): """Check if the graph is finalized. Raises: RuntimeError: If the graph finalized. """ if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op): """Adds 'op' to the graph. Args: op: the Operator or Tensor to add. Raises: TypeError: if op is not an Operation or Tensor. ValueError: if the op.name or op._id are already used. """ self._check_not_finalized() if not isinstance(op, (Tensor, Operation)): raise TypeError("op must be a Tensor or Operation: %s" % op) with self._lock: # pylint: disable=protected-access if op._id in self._nodes_by_id: raise ValueError("cannot add an op with id %d as it already " "exists in the graph" % op._id) if op.name in self._nodes_by_name: raise ValueError("cannot add op with name %s as that name " "is already used" % op.name) self._nodes_by_id[op._id] = op self._nodes_by_name[op.name] = op self._version = max(self._version, op._id) # pylint: enable=protected-access @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): """Returns a version number that increases as ops are added to the graph. Note that this is unrelated to the @{tf.Graph.graph_def_versions}. Returns: An integer version that increases as ops are added to the graph. """ if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long """The GraphDef version information of this graph. For details on the meaning of each version, see [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto). Returns: A `VersionDef`. """ # pylint: enable=line-too-long if self._c_graph: with c_api_util.tf_buffer() as buf: c_api.TF_GraphVersions(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def else: return self._graph_def_versions @property def seed(self): """The graph-level random seed of this graph.""" return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): """True if this graph has been finalized.""" return self._finalized def finalize(self): """Finalizes this graph, making it read-only. After calling `g.finalize()`, no new operations can be added to `g`. This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a @{tf.train.QueueRunner}. """ self._finalized = True def _unsafe_unfinalize(self): """Opposite of `finalize`. Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method. """ self._finalized = False def _get_control_flow_context(self): """Returns the current control flow context. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context. Args: ctx: a context object. """ self._control_flow_context = ctx def _copy_functions_to_graph_def(self, graph_def, starting_bytesize): """If this graph contains functions, copy them to `graph_def`.""" bytesize = starting_bytesize for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph_def.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph_def.library.gradient.extend([grad_def]) def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A tuple containing a [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and the version of the graph to which that `GraphDef` corresponds. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long if self._c_graph: with self._lock: with c_api_util.tf_buffer() as buf: c_api.TF_GraphToGraphDef(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) graph = graph_pb2.GraphDef() graph.ParseFromString(compat.as_bytes(data)) # Strip the experimental library field iff it's empty. if not graph.library.function: graph.ClearField("library") if add_shapes: for node in graph.node: op = self._nodes_by_name[node.name] if op.outputs: node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) else: with self._lock: graph = graph_pb2.GraphDef() graph.versions.CopyFrom(self._graph_def_versions) bytesize = 0 for op_id in sorted(self._nodes_by_id): op = self._nodes_by_id[op_id] if from_version is None or op_id > from_version: graph.node.extend([op.node_def]) if op.outputs and add_shapes: assert "_output_shapes" not in graph.node[-1].attr graph.node[-1].attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) bytesize += op.node_def.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") self._copy_functions_to_graph_def(graph, bytesize) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return name in self._functions def _get_function(self, name): """Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto. """ return self._functions.get(name, None) def _add_function(self, function): """Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to `Graph.create_op()`. Args: function: A `_DefinedFunction` object. Raises: ValueError: if another function is defined with the same name. """ name = function.name # Sanity checks on gradient definition. if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) # Add function to graph # pylint: disable=protected-access if self._c_graph: # Handle functions created without using the C API. TODO(apassos,skyewm) # remove this when all functions are generated using the C API by default # as this will be unnecessary. if not function._c_func: serialized = function.definition.SerializeToString() c_func = c_api.TF_FunctionImportFunctionDef(serialized) function._c_func = c_api_util.ScopedTFFunction(c_func) gradient = (function._grad_func._c_func.func if function._grad_func else None) c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient) else: # If there is already a function with the same name, raise an error # if bodies are different. Else, do nothing. The C API version above # has the same behavior. previous = self._functions.get(name, None) if previous: # This check is not ideal as we can have a hash collision with only # 32 bits in the hash, but the non C API mode is being deprecated. # Don't bother changing it now. if previous._hash_str == function._hash_str: return else: raise ValueError("Cannot add function (%s, hash %s) to graph (%s). " "Another function (%s, hash %s) is already defined " "with that name (%s)" % ( function, function._hash_str, self, previous, previous._hash_str, name)) # pylint: enable=protected-access self._functions[name] = function # Need a new-enough consumer to support the functions we add to the graph. if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): """Returns True iff this graph represents a function.""" return self._building_function # Helper functions to create operations. def create_op( self, op_type, inputs, dtypes, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): """Creates an `Operation` in this graph. This is a low-level interface for creating an `Operation`. Most programs will not call this method directly, and instead use the Python op constructors, such as `tf.constant()`, which add ops to the default graph. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_shapes: (Optional.) If True, shape inference will be performed to compute the shapes of the outputs. compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: TypeError: if any of the inputs is not a `Tensor`. ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ self._check_not_finalized() for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = _name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, device=None, attrs=attrs) input_ops = set([t.op for t in inputs]) control_inputs = self._control_dependencies_for_inputs(input_ops) # _create_op_helper mutates the new Operation. _lock ensures a Session.run # call cannot occur between creating and mutating the op. with self._lock: ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) # Note: shapes are lazily computed with the C API enabled. # # TODO(skyewm): unlike in the original Python implementation, the C API # always computes shape information (even for function calls, which the # original Python shape inference code doesn't handle). Deprecate the # compute_shapes argument. if not _USE_C_API and compute_shapes: set_shape_and_handle_data_for_outputs(ret) self._create_op_helper(ret, compute_shapes=compute_shapes, compute_device=compute_device) return ret def _create_op_from_tf_operation(self, c_op, compute_device=True): """Creates an `Operation` in this graph from the supplied TF_Operation. This method is like create_op() except the new Operation is constructed using `c_op`. The returned Operation will have `c_op` as its _c_op field. This is used to create Operation objects around TF_Operations created indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile). This function does not call Operation._control_flow_post_processing or Graph._control_dependencies_for_inputs (since the inputs may not be available yet). The caller is responsible for calling these methods. Args: c_op: a wrapped TF_Operation compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Returns: An `Operation` object. """ self._check_not_finalized() ret = Operation(c_op, self) # If a name_scope was created with ret.name but no nodes were created in it, # the name will still appear in _names_in_use even though the name hasn't # been used. This is ok, just leave _names_in_use as-is in this case. # TODO(skyewm): make the C API guarantee no name conflicts. if ret.name not in self._names_in_use: self._names_in_use[ret.name] = 1 self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_helper(self, op, compute_shapes=True, compute_device=True): """Common logic for creating an op in this graph.""" # TODO(b/XXXX): move to Operation.__init__ once _USE_C_API flag is removed. self._add_op(op) # Apply any additional attributes requested. Do not overwrite any existing # attributes. for key, value in self._attr_scope_map.items(): try: op.get_attr(key) except ValueError: if callable(value): value = value(op.node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) if value: op._set_attr(key, value) # pylint: disable=protected-access # Apply a kernel label if one has been specified for this op type. try: kernel_label = self._op_to_kernel_label_map[op.type] op._set_attr("_kernel", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass # Apply the overriding op type for gradients if one has been specified for # this op type. try: mapped_op_type = self._gradient_override_map[op.type] op._set_attr("_gradient_op_type", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass self._record_op_seen_by_control_dependencies(op) if compute_device: self._apply_device_functions(op) if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack: all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # Make this device match the device of the colocated op, to provide # consistency between the device and the colocation property. if (op.device and pydev.canonical_name(op.device) != pydev.canonical_name(colocation_op.device)): logging.warning("Tried to colocate %s with an op %s that had " "a different device: %s vs %s. Postponing " "error-checking until all devices are assigned.", op.name, colocation_op.name, op.device, colocation_op.device) else: op._set_device(colocation_op.device) # pylint: disable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) # pylint: disable=protected-access op._set_attr("_class", attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups))) # pylint: enable=protected-access # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None # TODO(skyewm): remove op.op_def check when _USE_C_API is removed. if self._container and op.op_def and op.op_def.is_stateful: try: container_attr = op.get_attr("container") except ValueError: # "container" attribute is not in OpDef pass else: if not container_attr: op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access s=compat.as_bytes(self._container))) def _add_new_tf_operations(self, compute_devices=True): """Creates `Operations` in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new `Operation` objects. """ # Create all Operation objects before accessing their inputs since an op may # be created before its inputs. new_ops = [ self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in c_api_util.new_tf_operations(self) ] # pylint: disable=protected-access for op in new_ops: # Operations created by the C API always retrieve shapes from the C API so # we preserve the shapes of ops created in import_graph_def (from the # "_output_shapes" attr of the imported NodeDef). if not _USE_C_SHAPES: _set_shape_and_handle_data_for_outputs_c_api(op) new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() # pylint: enable=protected-access return new_ops def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): """Returns the object referred to by `obj`, as an `Operation` or `Tensor`. This function validates that `obj` represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can also be any object with an `_as_graph_element()` method that returns a value of one of these types. allow_tensor: If true, `obj` may refer to a `Tensor`. allow_operation: If true, `obj` may refer to an `Operation`. Returns: The `Tensor` or `Operation` in the Graph corresponding to `obj`. Raises: TypeError: If `obj` is not a type we support attempting to convert to types. ValueError: If `obj` is of an appropriate type but invalid. For example, an invalid string. KeyError: If `obj` is not an object in the graph. """ if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): """See `Graph.as_graph_element()` for details.""" # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj # If obj appears to be a name... if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: # Looks like a Tensor name and can be a Tensor. try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: # Looks like a Tensor name but can't be a Tensor. raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: # Yep, it's an Operation name err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): """Return the list of operations in the graph. You can modify the operations in place, but modifications to the list such as inserts/delete have no effect on the list of operations known to the graph. This method may be called concurrently from multiple threads. Returns: A list of Operations. """ if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): """Returns the `Operation` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to an operation in this graph. """ if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many checks and does not have user friedly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: KeyError: If `name` does not correspond to an operation in this graph. """ if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def _get_operation_by_tf_operation(self, tf_oper): op_name = c_api.TF_OperationName(tf_oper) return self._get_operation_by_name_unsafe(op_name) def get_tensor_by_name(self, name): """Returns the `Tensor` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Tensor` to return. Returns: The `Tensor` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to a tensor in this graph. """ # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): """Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`. """ op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index] def _next_id(self): """Id for next Operation instance. Also increments the internal id.""" self._check_not_finalized() with self._lock: self._next_id_counter += 1 return self._next_id_counter @property def _last_id(self): return self._next_id_counter def _get_op_def(self, type): # pylint: disable=redefined-builtin """Returns the `OpDef` proto for `type`. `type` is a string.""" if self._c_graph: with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf) # pylint: enable=protected-access data = c_api.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) return op_def else: return self._registered_ops[type] def as_default(self): """Returns a context manager that makes this `Graph` the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the `with` keyword to specify that ops created within the scope of a block should be added to this graph. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. The following code examples are equivalent: ```python # 1. Using Graph.as_default(): g = tf.Graph() with g.as_default(): c = tf.constant(5.0) assert c.graph is g # 2. Constructing and making default: with tf.Graph().as_default() as g: c = tf.constant(5.0) assert c.graph is g ``` Returns: A context manager for using this graph as the default graph. """ return _default_graph_stack.get_controller(self) @property def collections(self): """Returns the names of the collections known to this graph.""" return list(self._collections) def add_to_collection(self, name, value): """Stores `value` in the collection with the given `name`. Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value) def add_to_collections(self, names, value): """Stores `value` in the collections given by `names`. Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in `names` are ignored, but it will not check for pre-existing membership of `value` in any of the collections in `names`. `names` can be any iterable, but if `names` is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. """ # Make sure names are unique, but treat strings as a single collection name names = (names,) if isinstance(names, six.string_types) else set(names) for name in names: self.add_to_collection(name, value) def get_collection_ref(self, name): """Returns a list of values in the collection with the given `name`. If the collection exists, this returns the list itself, which can be modified in place to change the collection. If the collection does not exist, it is created as an empty list and the list is returned. This is different from `get_collection()` which always returns a copy of the collection list if it exists and never creates an empty collection. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) with self._lock: coll_list = self._collections.get(name, None) if coll_list is None: coll_list = [] self._collections[name] = coll_list return coll_list def get_collection(self, name, scope=None): """Returns a list of values in the collection with the given `name`. This is different from `get_collection_ref()` which always returns the actual collection list if it exists in that it returns a new list each time it is called. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) with self._lock: collection = self._collections.get(name, None) if collection is None: return [] if scope is None: return list(collection) else: c = [] regex = re.compile(scope) for item in collection: if hasattr(item, "name") and regex.match(item.name): c.append(item) return c def get_all_collection_keys(self): """Returns a list of collections used in this graph.""" with self._lock: return [x for x in self._collections if isinstance(x, six.string_types)] def clear_collection(self, name): """Clears all values in a collection. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. """ self._check_not_finalized() with self._lock: if name in self._collections: del self._collections[name] @tf_contextlib.contextmanager def _original_op(self, op): """Python 'with' handler to help annotate ops with their originator. An op may have an 'original_op' property that indicates the op on which it was based. For example a replica op is based on the op that was replicated and a gradient op is based on the op that was differentiated. All ops created in the scope of this 'with' handler will have the given 'op' as their original op. Args: op: The Operation that all ops created in this scope will have as their original op. Yields: Nothing. """ old_original_op = self._default_original_op try: self._default_original_op = op yield finally: self._default_original_op = old_original_op @property def _name_stack(self): # This may be called from a thread where name_stack doesn't yet exist. if not hasattr(self._thread_local, "_name_stack"): self._thread_local._name_stack = "" return self._thread_local._name_stack @_name_stack.setter def _name_stack(self, name_stack): self._thread_local._name_stack = name_stack # pylint: disable=g-doc-return-or-yield,line-too-long @tf_contextlib.contextmanager def name_scope(self, name): r"""Returns a context manager that creates hierarchical names for operations. A graph maintains a stack of name scopes. A `with name_scope(...):` statement pushes a new name onto the stack for the lifetime of the context. The `name` argument will be interpreted as follows: * A string (not ending with '/') will create a new name scope, in which `name` is appended to the prefix of all operations created in the context. If `name` has been used before, it will be made unique by calling `self.unique_name(name)`. * A scope previously captured from a `with g.name_scope(...) as scope:` statement will be treated as an "absolute" name scope, which makes it possible to re-enter existing scopes. * A value of `None` or the empty string will reset the current name scope to the top-level (empty) name scope. For example: ```python with tf.Graph().as_default() as g: c = tf.constant(5.0, name="c") assert c.op.name == "c" c_1 = tf.constant(6.0, name="c") assert c_1.op.name == "c_1" # Creates a scope called "nested" with g.name_scope("nested") as scope: nested_c = tf.constant(10.0, name="c") assert nested_c.op.name == "nested/c" # Creates a nested scope called "inner". with g.name_scope("inner"): nested_inner_c = tf.constant(20.0, name="c") assert nested_inner_c.op.name == "nested/inner/c" # Create a nested scope called "inner_1". with g.name_scope("inner"): nested_inner_1_c = tf.constant(30.0, name="c") assert nested_inner_1_c.op.name == "nested/inner_1/c" # Treats `scope` as an absolute name scope, and # switches to the "nested/" scope. with g.name_scope(scope): nested_d = tf.constant(40.0, name="d") assert nested_d.op.name == "nested/d" with g.name_scope(""): e = tf.constant(50.0, name="e") assert e.op.name == "e" ``` The name of the scope itself can be captured by `with g.name_scope(...) as scope:`, which stores the name of the scope in the variable `scope`. This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example: ```python inputs = tf.constant(...) with g.name_scope('my_layer') as scope: weights = tf.Variable(..., name="weights") biases = tf.Variable(..., name="biases") affine = tf.matmul(inputs, weights) + biases output = tf.nn.relu(affine, name=scope) ``` NOTE: This constructor validates the given `name`. Valid scope names match one of the following regular expressions: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root) [A-Za-z0-9_.\\-/]* (for other scopes) Args: name: A name for the scope. Returns: A context manager that installs `name` as a new name scope. Raises: ValueError: If `name` is not a valid scope name, according to the rules above. """ if name: if isinstance(name, compat.bytes_or_text_types): name = compat.as_str(name) if self._name_stack: # Scopes created in a nested scope may have initial characters # that are illegal as the initial character of an op name # (viz. '-', '\', '/', and '_'). if not _VALID_SCOPE_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) else: # Scopes created in the root must match the more restrictive # op name regex, which constrains the initial character. if not _VALID_OP_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) try: old_stack = self._name_stack if not name: # Both for name=None and name="" we re-set to empty scope. new_stack = None elif name[-1] == "/": new_stack = _name_from_scope_name(name) else: new_stack = self.unique_name(name) self._name_stack = new_stack yield "" if new_stack is None else new_stack + "/" finally: self._name_stack = old_stack # pylint: enable=g-doc-return-or-yield,line-too-long def unique_name(self, name, mark_as_used=True): """Return a unique operation name for `name`. Note: You rarely need to call `unique_name()` directly. Most of the time you just need to create `with g.name_scope()` blocks to generate structured names. `unique_name` is used to generate structured names, separated by `"/"`, to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If `mark_as_used` is set to `True`, which is the default, a new unique name is created and marked as in use. If it's set to `False`, the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to `create_op()` that will be used to name the operation being created. """ if self._name_stack: name = self._name_stack + "/" + name i = self._names_in_use.get(name, 0) # Increment the number for "name". if mark_as_used: self._names_in_use[name] = i + 1 if i > 0: base_name = name # Make sure the composed name is not already used. while name in self._names_in_use: name = "%s_%d" % (base_name, i) i += 1 # Mark the composed name as used in case someone wants # to call unique_name("name_1"). if mark_as_used: self._names_in_use[name] = 1 return name def get_name_scope(self): """Returns the current name scope. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_default_graph().get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ return self._name_stack @tf_contextlib.contextmanager def _colocate_with_for_gradient(self, op, gradient_uid, ignore_existing=False): with self.colocate_with(op, ignore_existing): if gradient_uid is not None and self._control_flow_context is not None: try: self._control_flow_context.EnterGradientColocation(op, gradient_uid) yield finally: self._control_flow_context.ExitGradientColocation(op, gradient_uid) else: yield @tf_contextlib.contextmanager def colocate_with(self, op, ignore_existing=False): """Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: ```python a = tf.Variable([1.0]) with g.colocate_with(a): b = tf.constant(1.0) c = tf.add(a, b) ``` `b` and `c` will always be colocated with `a`, no matter where `a` is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If `op` is `None` then `ignore_existing` must be `True` and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or `None`. ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If `op` is `None`, this value must be `True`. Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops. """ if op is None and not ignore_existing: raise ValueError("Trying to reset colocation (op is None) but " "ignore_existing is not True") if op is not None and not isinstance(op, Operation): # We always want to colocate with the reference op. op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op # By default, colocate_with resets the device function stack, # since colocate_with is typically used in specific internal # library functions where colocation is intended to be "stronger" # than device functions. # # In the future, a caller may specify that device_functions win # over colocation, in which case we can add support. device_fn_tmp = self._device_function_stack self._device_function_stack = [] if ignore_existing: current_stack = self._colocation_stack self._colocation_stack = [] if op is not None: self._colocation_stack.append(op) try: yield finally: # Restore device function stack self._device_function_stack = device_fn_tmp if op is not None: self._colocation_stack.pop() # Reset the colocation stack if requested. if ignore_existing: self._colocation_stack = current_stack @tf_contextlib.contextmanager def device(self, device_name_or_function): # pylint: disable=line-too-long """Returns a context manager that specifies the default device to use. The `device_name_or_function` argument may either be a device name string, a device function, or None: * If it is a device name string, all operations constructed in this context will be assigned to the device with that name, unless overridden by a nested `device()` context. * If it is a function, it will be treated as a function from Operation objects to device name strings, and invoked each time a new Operation is created. The Operation will be assigned to the device with the returned name. * If it is None, all `device()` invocations from the enclosing context will be ignored. For information about the valid syntax of device name strings, see the documentation in [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h). For example: ```python with g.device('/device:GPU:0'): # All operations constructed in this context will be placed # on GPU 0. with g.device(None): # All operations constructed in this context will have no # assigned device. # Defines a function from `Operation` to device string. def matmul_on_gpu(n): if n.type == "MatMul": return "/device:GPU:0" else: return "/cpu:0" with g.device(matmul_on_gpu): # All operations of type "MatMul" constructed in this context # will be placed on GPU 0; all other operations will be placed # on CPU 0. ``` **N.B.** The device scope may be overridden by op wrappers or other library code. For example, a variable assignment op `v.assign()` must be colocated with the `tf.Variable` `v`, and incompatible device scopes will be ignored. Args: device_name_or_function: The device name or function to use in the context. Yields: A context manager that specifies the default device to use for newly created ops. """ # pylint: enable=line-too-long if (device_name_or_function is not None and not callable(device_name_or_function)): device_function = pydev.merge_device(device_name_or_function) else: device_function = device_name_or_function try: self._device_function_stack.append(device_function) yield finally: self._device_function_stack.pop() def _apply_device_functions(self, op): """Applies the current device function stack to the given operation.""" # Apply any device functions in reverse order, so that the most recently # pushed function has the first chance to apply a device to the op. # We apply here because the result can depend on the Operation's # signature, which is computed in the Operation constructor. for device_function in reversed(self._device_function_stack): if device_function is None: break op._set_device(device_function(op)) # pylint: disable=protected-access # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def container(self, container_name): """Returns a context manager that specifies the resource container to use. Stateful operations, such as variables and queues, can maintain their states on devices so that they can be shared by multiple processes. A resource container is a string name under which these stateful operations are tracked. These resources can be released or cleared with `tf.Session.reset()`. For example: ```python with g.container('experiment0'): # All stateful Operations constructed in this context will be placed # in resource container "experiment0". v1 = tf.Variable([1.0]) v2 = tf.Variable([2.0]) with g.container("experiment1"): # All stateful Operations constructed in this context will be # placed in resource container "experiment1". v3 = tf.Variable([3.0]) q1 = tf.FIFOQueue(10, tf.float32) # All stateful Operations constructed in this context will be # be created in the "experiment0". v4 = tf.Variable([4.0]) q1 = tf.FIFOQueue(20, tf.float32) with g.container(""): # All stateful Operations constructed in this context will be # be placed in the default resource container. v5 = tf.Variable([5.0]) q3 = tf.FIFOQueue(30, tf.float32) # Resets container "experiment0", after which the state of v1, v2, v4, q1 # will become undefined (such as uninitialized). tf.Session.reset(target, ["experiment0"]) ``` Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name. """ original_container = self._container try: self._container = container_name yield self._container finally: self._container = original_container # pylint: enable=g-doc-return-or-yield class _ControlDependenciesController(object): """Context manager for `control_dependencies()`.""" def __init__(self, graph, control_inputs): """Create a new `_ControlDependenciesController`. A `_ControlDependenciesController` is the context manager for `with tf.control_dependencies()` blocks. These normally nest, as described in the documentation for `control_dependencies()`. The `control_inputs` argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value `None` indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared. """ self._graph = graph if control_inputs is None: self._control_inputs_val = [] self._new_stack = True else: self._control_inputs_val = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None # pylint: disable=protected-access def __enter__(self): if self._new_stack: # Clear the control_dependencies graph. self._old_stack = self._graph._control_dependencies_stack self._graph._control_dependencies_stack = [] # Clear the control_flow_context too. self._old_control_flow_context = self._graph._get_control_flow_context() self._graph._set_control_flow_context(None) self._graph._push_control_dependencies_controller(self) def __exit__(self, unused_type, unused_value, unused_traceback): self._graph._pop_control_dependencies_controller(self) if self._new_stack: self._graph._control_dependencies_stack = self._old_stack self._graph._set_control_flow_context(self._old_control_flow_context) # pylint: enable=protected-access @property def control_inputs(self): return self._control_inputs_val def add_op(self, op): self._seen_nodes.add(op) def op_in_group(self, op): return op in self._seen_nodes def _push_control_dependencies_controller(self, controller): self._control_dependencies_stack.append(controller) def _pop_control_dependencies_controller(self, controller): assert self._control_dependencies_stack[-1] is controller self._control_dependencies_stack.pop() def _current_control_dependencies(self): ret = set() for controller in self._control_dependencies_stack: for op in controller.control_inputs: ret.add(op) return ret def _control_dependencies_for_inputs(self, input_ops): """For an op that takes `input_ops` as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same `with control_dependencies(...):` block may have data dependencies that make the explicit approach redundant. Args: input_ops: The data input ops for an op to be created. Returns: A list of control inputs for the op to be created. """ ret = [] for controller in self._control_dependencies_stack: # If any of the input_ops already depends on the inputs from controller, # we say that the new op is dominated (by that input), and we therefore # do not need to add control dependencies for this controller's inputs. dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: # Don't add a control input if we already have a data dependency on i. # NOTE(mrry): We do not currently track transitive data dependencies, # so we may add redundant control inputs. ret.extend([c for c in controller.control_inputs if c not in input_ops]) return ret def _record_op_seen_by_control_dependencies(self, op): """Record that the given op depends on all registered control dependencies. Args: op: An Operation. """ for controller in self._control_dependencies_stack: controller.add_op(op) def control_dependencies(self, control_inputs): """Returns a context manager that specifies control dependencies. Use with the `with` keyword to specify that all operations constructed within the context should have control dependencies on `control_inputs`. For example: ```python with g.control_dependencies([a, b, c]): # `d` and `e` will only run after `a`, `b`, and `c` have executed. d = ... e = ... ``` Multiple calls to `control_dependencies()` can be nested, and in that case a new `Operation` will have control dependencies on the union of `control_inputs` from all active contexts. ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `a`, `b`, `c`, and `d`. ``` You can pass None to clear the control dependencies: ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies(None): # Ops constructed here run normally, not waiting for either `a` or `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `c` and `d`, also not waiting # for either `a` or `b`. ``` *N.B.* The control dependencies context applies *only* to ops that are constructed within the context. Merely using an op or tensor in the context does not add a control dependency. The following example illustrates this point: ```python # WRONG def my_func(pred, tensor): t = tf.matmul(tensor, tensor) with tf.control_dependencies([pred]): # The matmul op is created outside the context, so no control # dependency will be added. return t # RIGHT def my_func(pred, tensor): with tf.control_dependencies([pred]): # The matmul op is created in the context, so a control dependency # will be added. return tf.matmul(tensor, tensor) ``` Also note that though execution of ops created under this scope will trigger execution of the dependencies, the ops created under this scope might still be pruned from a normal tensorflow graph. For example, in the following snippet of code the dependencies are never executed: ```python loss = model.loss() with tf.control_dependencies(dependencies): loss = loss + tf.constant(1) # note: dependencies ignored in the # backward pass return tf.gradients(loss, model.variables) ``` This is because evaluating the gradient graph does not require evaluating the constant(1) op created in the forward pass. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ if control_inputs is None: return self._ControlDependenciesController(self, None) # First convert the inputs to ops, and deduplicate them. # NOTE(mrry): Other than deduplication, we do not currently track direct # or indirect dependencies between control_inputs, which may result in # redundant control inputs. control_ops = [] current = self._current_control_dependencies() for c in control_inputs: if isinstance(c, IndexedSlices): c = c.op c = self.as_graph_element(c) if isinstance(c, Tensor): c = c.op elif not isinstance(c, Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) if c not in current: control_ops.append(c) current.add(c) return self._ControlDependenciesController(self, control_ops) # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _attr_scope(self, attr_map): """EXPERIMENTAL: A context manager for setting attributes on operators. This context manager can be used to add additional attributes to operators within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # No extra attributes with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}): f_2 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}): f_3 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": None}): f_4 = Foo() # No additional attributes. Args: attr_map: A dictionary mapping attr name strings to AttrValue protocol buffers or None. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If attr_map is not a dictionary mapping strings to AttrValue protobufs. """ if not isinstance(attr_map, dict): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers") # The saved_attrs dictionary stores any currently-set labels that # will be overridden by this context manager. saved_attrs = {} # Install the given attribute for name, attr in attr_map.items(): if not (isinstance(name, six.string_types) and (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or callable(attr))): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers or " "callables that emit AttrValue protocol buffers") try: saved_attrs[name] = self._attr_scope_map[name] except KeyError: pass if attr is None: del self._attr_scope_map[name] else: self._attr_scope_map[name] = attr try: yield # The code within the context runs here. finally: # Remove the attributes set for this context, and restore any saved # attributes. for name, attr in attr_map.items(): try: self._attr_scope_map[name] = saved_attrs[name] except KeyError: del self._attr_scope_map[name] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _kernel_label_map(self, op_to_kernel_label_map): """EXPERIMENTAL: A context manager for setting kernel labels. This context manager can be used to select particular implementations of kernels within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # Uses the default registered kernel for the Foo op. with g.kernel_label_map({"Foo": "v_2"}): f_2 = Foo() # Uses the registered kernel with label "v_2" # for the Foo op. with g.kernel_label_map({"Foo": "v_3"}): f_3 = Foo() # Uses the registered kernel with label "v_3" # for the Foo op. with g.kernel_label_map({"Foo": ""}): f_4 = Foo() # Uses the default registered kernel # for the Foo op. Args: op_to_kernel_label_map: A dictionary mapping op type strings to kernel label strings. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If op_to_kernel_label_map is not a dictionary mapping strings to strings. """ if not isinstance(op_to_kernel_label_map, dict): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") # The saved_labels dictionary stores any currently-set labels that # will be overridden by this context manager. saved_labels = {} # Install the given label for op_type, label in op_to_kernel_label_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(label, six.string_types)): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") try: saved_labels[op_type] = self._op_to_kernel_label_map[op_type] except KeyError: pass self._op_to_kernel_label_map[op_type] = label try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, label in op_to_kernel_label_map.items(): try: self._op_to_kernel_label_map[op_type] = saved_labels[op_type] except KeyError: del self._op_to_kernel_label_map[op_type] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def gradient_override_map(self, op_type_map): """EXPERIMENTAL: A context manager for overriding gradient functions. This context manager can be used to override the gradient function that will be used for ops within the scope of the context. For example: ```python @tf.RegisterGradient("CustomSquare") def _custom_square_grad(op, grad): # ... with tf.Graph().as_default() as g: c = tf.constant(5.0) s_1 = tf.square(c) # Uses the default gradient for tf.square. with g.gradient_override_map({"Square": "CustomSquare"}): s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the # gradient of s_2. ``` Args: op_type_map: A dictionary mapping op type strings to alternative op type strings. Returns: A context manager that sets the alternative op type to be used for one or more ops created in that context. Raises: TypeError: If `op_type_map` is not a dictionary mapping strings to strings. """ if not isinstance(op_type_map, dict): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") # The saved_mappings dictionary stores any currently-set mappings that # will be overridden by this context manager. saved_mappings = {} # Install the given label for op_type, mapped_op_type in op_type_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(mapped_op_type, six.string_types)): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") try: saved_mappings[op_type] = self._gradient_override_map[op_type] except KeyError: pass self._gradient_override_map[op_type] = mapped_op_type try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, mapped_op_type in op_type_map.items(): try: self._gradient_override_map[op_type] = saved_mappings[op_type] except KeyError: del self._gradient_override_map[op_type] # pylint: enable=g-doc-return-or-yield def prevent_feeding(self, tensor): """Marks the given `tensor` as unfeedable in this graph.""" self._unfeedable_tensors.add(tensor) def is_feedable(self, tensor): """Returns `True` if and only if `tensor` is feedable.""" return tensor not in self._unfeedable_tensors def prevent_fetching(self, op): """Marks the given `op` as unfetchable in this graph.""" self._unfetchable_ops.add(op) def is_fetchable(self, tensor_or_op): """Returns `True` if and only if `tensor_or_op` is fetchable.""" if isinstance(tensor_or_op, Tensor): return tensor_or_op.op not in self._unfetchable_ops else: return tensor_or_op not in self._unfetchable_ops def switch_to_thread_local(self): """Make device, colocation and dependencies stacks thread-local. Device, colocation and dependencies stacks are not thread-local be default. If multiple threads access them, then the state is shared. This means that one thread may affect the behavior of another thread. After this method is called, the stacks become thread-local. If multiple threads access them, then the state is not shared. Each thread uses its own value; a thread doesn't affect other threads by mutating such a stack. The initial value for every thread's stack is set to the current value of the stack when `switch_to_thread_local()` was first called. """ if not self._stack_state_is_thread_local: self._stack_state_is_thread_local = True @property def _device_function_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where device_function_stack doesn't yet # exist. if not hasattr(self._thread_local, "_device_function_stack"): self._thread_local._device_function_stack = ( self._graph_device_function_stack[:]) return self._thread_local._device_function_stack else: return self._graph_device_function_stack @_device_function_stack.setter def _device_function_stack(self, device_function_stack): if self._stack_state_is_thread_local: self._thread_local._device_function_stack = device_function_stack else: self._graph_device_function_stack = device_function_stack @property def _colocation_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where colocation_stack doesn't yet # exist. if not hasattr(self._thread_local, "_colocation_stack"): self._thread_local._colocation_stack = self._graph_colocation_stack[:] return self._thread_local._colocation_stack else: return self._graph_colocation_stack @_colocation_stack.setter def _colocation_stack(self, colocation_stack): if self._stack_state_is_thread_local: self._thread_local._colocation_stack = colocation_stack else: self._graph_colocation_stack = colocation_stack @property def _control_dependencies_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where control_dependencies_stack # doesn't yet exist. if not hasattr(self._thread_local, "_control_dependencies_stack"): self._thread_local._control_dependencies_stack = ( self._graph_control_dependencies_stack[:]) return self._thread_local._control_dependencies_stack else: return self._graph_control_dependencies_stack @_control_dependencies_stack.setter def _control_dependencies_stack(self, control_dependencies): if self._stack_state_is_thread_local: self._thread_local._control_dependencies_stack = control_dependencies else: self._graph_control_dependencies_stack = control_dependencies # TODO(agarwal): currently device directives in an outer eager scope will not # apply to inner graph mode code. Fix that. @tf_export("device") def device(device_name_or_function): """Wrapper for `Graph.device()` using the default graph. See @{tf.Graph.device} for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in. """ if context.executing_eagerly(): # TODO(agarwal): support device functions in EAGER mode. if callable(device_name_or_function): raise RuntimeError( "tf.device does not support functions when eager execution " "is enabled.") return context.device(device_name_or_function) else: return get_default_graph().device(device_name_or_function) @tf_export("container") def container(container_name): """Wrapper for `Graph.container()` using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops. """ return get_default_graph().container(container_name) def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False): if context.executing_eagerly(): if op is not None: return device(op.device) else: return _NullContextmanager() else: default_graph = get_default_graph() if isinstance(op, EagerTensor): if default_graph.building_function: op = internal_convert_to_tensor(op) else: raise ValueError("Encountered an Eager-defined Tensor during graph " "construction, but a function was not being built.") return default_graph._colocate_with_for_gradient( op, gradient_uid=gradient_uid, ignore_existing=ignore_existing) @tf_export("colocate_with") def colocate_with(op, ignore_existing=False): return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing) @tf_export("control_dependencies") def control_dependencies(control_inputs): """Wrapper for `Graph.control_dependencies()` using the default graph. See @{tf.Graph.control_dependencies} for more details. When eager execution is enabled, any callable object in the `control_inputs` list will be called. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. If eager execution is enabled, any callable object in the `control_inputs` list will be called. Returns: A context manager that specifies control dependencies for all operations constructed within the context. """ if context.executing_eagerly(): if control_inputs: # Excute any pending callables. for control in control_inputs: if callable(control): control() return _NullContextmanager() else: return get_default_graph().control_dependencies(control_inputs) class _DefaultStack(threading.local): """A thread-local stack of objects for providing implicit defaults.""" def __init__(self): super(_DefaultStack, self).__init__() self._enforce_nesting = True self.stack = [] def get_default(self): return self.stack[-1] if len(self.stack) >= 1 else None def reset(self): self.stack = [] def is_cleared(self): return not self.stack @property def enforce_nesting(self): return self._enforce_nesting @enforce_nesting.setter def enforce_nesting(self, value): self._enforce_nesting = value @tf_contextlib.contextmanager def get_controller(self, default): """A context manager for manipulating a default stack.""" try: self.stack.append(default) yield default finally: # stack may be empty if reset() was called if self.stack: if self._enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default) _default_session_stack = _DefaultStack() # pylint: disable=protected-access def default_session(session): """Python "with" handler for defining a default session. This function provides a means of registering a session for handling Tensor.eval() and Operation.run() calls. It is primarily intended for use by session.Session, but can be used with any object that implements the Session.run() interface. Use with the "with" keyword to specify that Tensor.eval() and Operation.run() invocations within the scope of a block should be executed by a particular session. The default session applies to the current thread only, so it is always possible to inspect the call stack and determine the scope of a default session. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a "with ops.default_session(sess):" block in that thread's function. Example: The following code examples are equivalent: # 1. Using the Session object directly: sess = ... c = tf.constant(5.0) sess.run(c) # 2. Using default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) result = c.eval() # 3. Overriding default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) with ops.default_session(...): c.eval(session=sess) Args: session: The session to be installed as the default session. Returns: A context manager for the default session. """ return _default_session_stack.get_controller(session) @tf_export("get_default_session") def get_default_session(): """Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread. """ return _default_session_stack.get_default() def _eval_using_default_session(tensors, feed_dict, graph, session=None): """Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict) def _run_using_default_session(operation, feed_dict, graph, session=None): """Uses the default session to run "operation". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which "operation" is defined. session: (Optional) A different session to use to run "operation". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot execute operation using `run()`: No default " "session is registered. Use `with " "sess.as_default():` or pass an explicit session to " "`run(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to execute operation: " "the operation's graph is different from the " "session's graph. Pass an explicit session to " "run(session=sess).") else: if session.graph is not graph: raise ValueError("Cannot use the given session to execute operation: " "the operation's graph is different from the session's " "graph.") session.run(operation, feed_dict) class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access """A thread-local stack of objects for providing an implicit default graph.""" def __init__(self): super(_DefaultGraphStack, self).__init__() self._global_default_graph = None def get_default(self): """Override that returns a global default if the stack is empty.""" ret = super(_DefaultGraphStack, self).get_default() if ret is None: ret = self._GetGlobalDefaultGraph() return ret def _GetGlobalDefaultGraph(self): if self._global_default_graph is None: # TODO(mrry): Perhaps log that the default graph is being used, or set # provide some other feedback to prevent confusion when a mixture of # the global default graph and an explicit graph are combined in the # same process. self._global_default_graph = Graph() return self._global_default_graph def reset(self): super(_DefaultGraphStack, self).reset() self._global_default_graph = None @tf_contextlib.contextmanager def get_controller(self, default): try: if context.executing_eagerly(): # A Graph alone on the context stack would keep init_scope-wrapped # operations graph building when entered (assuming init_scope is called # in a graph building context). Instead, we push a context which first # enables eager execution and then re-enters the Graph. context.context().context_switches.push( default.building_function, functools.partial( _enter_context_and_graph, context.eager_mode, default.as_default)) else: # This Graph is being used from a graph building context. A lack of # context switch implies that the context is graph building. context.context().context_switches.push(default.building_function, default.as_default) with super(_DefaultGraphStack, self).get_controller(default) as g: yield g finally: context.context().context_switches.pop() @tf_contextlib.contextmanager def _enter_context_and_graph(context_fn, graph_fn): """Combines two context managers.""" with context_fn(), graph_fn(): yield _default_graph_stack = _DefaultGraphStack() # pylint: disable=g-doc-return-or-yield,line-too-long @tf_contextlib.contextmanager def init_scope(): """A context manager that lifts ops out of control-flow scopes and function-building graphs. There is often a need to lift variable initialization ops out of control-flow scopes, function-building graphs, and gradient tapes. Entering an `init_scope` is a mechanism for satisfying these desiderata. In particular, entering an `init_scope` has three effects: (1) All control dependencies are cleared the moment the scope is entered; this is equivalent to entering the context manager returned from `control_dependencies(None)`, which has the side-effect of exiting control-flow scopes like `tf.cond` and `tf.while_loop`. (2) All operations that are created while the scope is active are lifted into the lowest context on the `context_stack` that is not building a graph function. Here, a context is defined as either a graph or an eager context. Every context switch, i.e., every installation of a graph as the default graph and every switch into eager mode, is logged in a thread-local stack called `context_switches`; the log entry for a context switch is popped from the stack when the context is exited. Entering an `init_scope` is equivalent to crawling up `context_switches`, finding the first context that is not building a graph function, and entering it. A caveat is that if graph mode is enabled but the default graph stack is empty, then entering an `init_scope` will simply install a fresh graph as the default one. (3) The gradient tape is paused while the scope is active. """ # pylint: enable=g-doc-return-or-yield,line-too-long if context.executing_eagerly(): # Fastpath. with tape.stop_recording(): yield else: # Retrieve the active name scope: entering an `init_scope` preserves # the name scope of the current context. default_graph = get_default_graph() scope = default_graph.get_name_scope() if scope and scope[-1] != '/': # Names that end with trailing slashes are treated by `name_scope` as # absolute. scope = scope + '/' outer_context = None if not _default_graph_stack.stack: # If the default graph stack is empty, then we cannot be building a # function. Install the global graph (which, in this case, is also the # default graph) as the outer context. if default_graph.building_function: raise RuntimeError("The global graph is building a function.") outer_context = default_graph.as_default else: # Find a context that is not building a function. for stack_entry in reversed(context.context().context_switches.stack): if not stack_entry.is_building_function: outer_context = stack_entry.enter_context_fn break if outer_context is None: # As a last resort, obtain the global default graph; this graph doesn't # necessarily live on the graph stack (and hence it doesn't necessarily # live on the context stack), but it is stored in the graph stack's # encapsulating object. outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access if outer_context is None: # Sanity check; this shouldn't be triggered. raise RuntimeError("All graphs are building functions, and no " "eager context was previously active.") with outer_context(), name_scope(scope), control_dependencies( None), tape.stop_recording(): yield @tf_export("enable_eager_execution") def enable_eager_execution(config=None, device_policy=None, execution_mode=None): """Enables eager execution for the lifetime of this program. Eager execution provides an imperative interface to TensorFlow. With eager execution enabled, TensorFlow functions execute operations immediately (as opposed to adding to a graph to be executed later in a @{tf.Session}) and return concrete values (as opposed to symbolic references to a node in a computational graph). For example: ```python tf.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and Tensor objects hold concrete values, which can be accessed as # numpy.ndarray`s through the numpy() method. assert tf.multiply(6, 7).numpy() == 42 ``` Eager execution cannot be enabled after TensorFlow APIs have been used to create or execute graphs. It is typically recommended to invoke this function at program startup and not in a library (as most libraries should be usable both with and without eager execution). Args: config: (Optional.) A @{tf.ConfigProto} to use to configure the environment in which operations are executed. Note that @{tf.ConfigProto} is also used to configure graph execution (via @{tf.Session}) and many options within `tf.ConfigProto` are not implemented (or are irrelevant) when eager execution is enabled. device_policy: (Optional.) Policy controlling how operations requiring inputs on a specific device (e.g., a GPU 0) handle inputs on a different device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but logs a warning. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, raising errors on the other ones. execution_mode: (Optional.) Policy controlling how operations dispatched are actually executed. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.SYNC: executes each operation synchronously. - tf.contrib.eager.ASYNC: executes each operation asynchronously. These operations may return "non-ready" handles. Raises: ValueError: If eager execution is enabled after creating/executing a TensorFlow graph, or if options provided conflict with a previous call to this function. """ if config is not None and not isinstance(config, config_pb2.ConfigProto): raise TypeError( "config must be a tf.ConfigProto, but got %s" % type(config)) if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32): raise ValueError( "device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*" ) if execution_mode not in (None, context.SYNC, context.ASYNC): raise ValueError( "execution_mode must be one of None, tf.contrib.eager.SYNC, " "tf.contrib.eager.ASYNC") # pylint: disable=protected-access if context._default_mode == context.GRAPH_MODE: graph_mode_has_been_used = ( _default_session_stack.stack or _default_graph_stack._global_default_graph is not None) if graph_mode_has_been_used: raise ValueError( "tf.enable_eager_execution must be called at program startup.") context._default_mode = context.EAGER_MODE if context._context is None: context._context = context.Context( config=config, device_policy=device_policy, execution_mode=execution_mode) elif ((config is not None and config is not context._context._config) or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode)): raise ValueError("Trying to change the options of an active eager" " execution. Context config: %s, specified config:" " %s. Context device policy: %s, specified device" " policy: %s. Context execution mode: %s, " " specified execution mode %s." % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode)) else: raise ValueError( "tf.enable_eager_execution must be called at program startup.") # Monkey patch to get rid of an unnecessary conditional since the context is # now initialized. context.context = context.context_safe def eager_run(main=None, argv=None): """Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: ```python import tensorflow as tf # Import subject to future changes: from tensorflow.contrib.eager.python import tfe def main(_): u = tf.constant(6.0) v = tf.constant(7.0) print(u * v) if __name__ == "__main__": tfe.run() ``` Args: main: the main function to run. argv: the arguments to pass to it. """ enable_eager_execution() app.run(main, argv) @tf_export("reset_default_graph") def reset_default_graph(): """Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a `tf.Session` or `tf.InteractiveSession` is active will result in undefined behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects after calling this function will result in undefined behavior. Raises: AssertionError: If this function is called within a nested graph. """ if not _default_graph_stack.is_cleared(): raise AssertionError("Do not use tf.reset_default_graph() to clear " "nested graphs. If you need a cleared graph, " "exit the nesting and create a new graph.") _default_graph_stack.reset() @tf_export("get_default_graph") def get_default_graph(): """Returns the default graph for the current thread. The returned graph will be the innermost graph on which a `Graph.as_default()` context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. Returns: The default `Graph` being used in the current thread. """ return _default_graph_stack.get_default() def get_name_scope(): """Returns the current name scope in the default_graph. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ if context.executing_eagerly(): return context.context().scope_name.rstrip("/") return get_default_graph().get_name_scope() def _assert_same_graph(original_item, item): """Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match. """ if original_item.graph is not item.graph: raise ValueError("%s must be from the same graph as %s." % (item, original_item)) def _get_graph_from_inputs(op_input_list, graph=None): """Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the "graph" is specified explicitly, we validate that all of the inputs in "op_input_list" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in "op_input_list", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from "op_input_list", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs. """ if get_default_graph().building_function: return get_default_graph() op_input_list = tuple(op_input_list) # Handle generators correctly if graph and not isinstance(graph, Graph): raise TypeError("Input graph needs to be a Graph: %s" % graph) # 1. We validate that all of the inputs are from the same graph. This is # either the supplied graph parameter, or the first one selected from one # the graph-element-valued inputs. In the latter case, we hold onto # that input in original_graph_element so we can provide a more # informative error if a mismatch is found. original_graph_element = None for op_input in op_input_list: # Determine if this is a valid graph_element. # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this # up. graph_element = None if (isinstance(op_input, (Operation, _TensorLike)) and ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = graph_element.graph elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError("%s is not from the passed-in graph." % graph_element) # 2. If all else fails, we use the default graph, which is always there. return graph or get_default_graph() @tf_export("GraphKeys") class GraphKeys(object): """Standard names to use for graph collections. The standard library uses various well-known names to collect and retrieve values associated with a graph. For example, the `tf.Optimizer` subclasses default to optimizing the variables collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is specified, but it is also possible to pass an explicit list of variables. The following standard keys are defined: * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared across distributed environment (model variables are subset of these). See @{tf.global_variables} for more details. Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`, and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`. * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each machine. Usually used for temporarily variables, like counters. Note: use `tf.contrib.framework.local_variable` to add to this collection. * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the model for inference (feed forward). Note: use `tf.contrib.framework.model_variable` to add to this collection. * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will be trained by an optimizer. See @{tf.trainable_variables} for more details. * `SUMMARIES`: the summary `Tensor` objects that have been created in the graph. See @{tf.summary.merge_all} for more details. * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to produce input for a computation. See @{tf.train.start_queue_runners} for more details. * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also keep moving averages. See @{tf.moving_average_variables} for more details. * `REGULARIZATION_LOSSES`: regularization losses collected during graph construction. The following standard keys are _defined_, but their collections are **not** automatically populated as many of the others are: * `WEIGHTS` * `BIASES` * `ACTIVATIONS` """ # Key to collect Variable objects that are global (shared across machines). # Default collection for all variables, except local ones. GLOBAL_VARIABLES = "variables" # Key to collect local variables that are local to the machine and are not # saved/restored. LOCAL_VARIABLES = "local_variables" # Key to collect local variables which are used to accumulate interal state # to be used in tf.metrics.*. METRIC_VARIABLES = "metric_variables" # Key to collect model variables defined by layers. MODEL_VARIABLES = "model_variables" # Key to collect Variable objects that will be trained by the # optimizers. TRAINABLE_VARIABLES = "trainable_variables" # Key to collect summaries. SUMMARIES = "summaries" # Key to collect QueueRunners. QUEUE_RUNNERS = "queue_runners" # Key to collect table initializers. TABLE_INITIALIZERS = "table_initializer" # Key to collect asset filepaths. An asset represents an external resource # like a vocabulary file. ASSET_FILEPATHS = "asset_filepaths" # Key to collect Variable objects that keep moving averages. MOVING_AVERAGE_VARIABLES = "moving_average_variables" # Key to collect regularization losses at graph construction. REGULARIZATION_LOSSES = "regularization_losses" # Key to collect concatenated sharded variables. CONCATENATED_VARIABLES = "concatenated_variables" # Key to collect savers. SAVERS = "savers" # Key to collect weights WEIGHTS = "weights" # Key to collect biases BIASES = "biases" # Key to collect activations ACTIVATIONS = "activations" # Key to collect update_ops UPDATE_OPS = "update_ops" # Key to collect losses LOSSES = "losses" # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing. SAVEABLE_OBJECTS = "saveable_objects" # Key to collect all shared resources used by the graph which need to be # initialized once per cluster. RESOURCES = "resources" # Key to collect all shared resources used in this graph which need to be # initialized once per session. LOCAL_RESOURCES = "local_resources" # Trainable resource-style variables. TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables" # Key to indicate various ops. INIT_OP = "init_op" LOCAL_INIT_OP = "local_init_op" READY_OP = "ready_op" READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op" SUMMARY_OP = "summary_op" GLOBAL_STEP = "global_step" # Used to count the number of evaluations performed during a single evaluation # run. EVAL_STEP = "eval_step" TRAIN_OP = "train_op" # Key for control flow context. COND_CONTEXT = "cond_context" WHILE_CONTEXT = "while_context" # Used to store v2 summary names. _SUMMARY_COLLECTION = "_SUMMARY_V2" # List of all collections that keep track of variables. _VARIABLE_COLLECTIONS = [ GLOBAL_VARIABLES, LOCAL_VARIABLES, METRIC_VARIABLES, MODEL_VARIABLES, TRAINABLE_VARIABLES, MOVING_AVERAGE_VARIABLES, CONCATENATED_VARIABLES, TRAINABLE_RESOURCE_VARIABLES, ] # Key for streaming model ports. # NOTE(yuanbyu): internal and experimental. _STREAMING_MODEL_PORTS = "streaming_model_ports" @decorator_utils.classproperty def VARIABLES(cls): # pylint: disable=no-self-argument logging.log_first_n(logging.WARN, "VARIABLES collection name is deprecated, please use " "GLOBAL_VARIABLES instead; VARIABLES will be removed " "after 2017-03-02.", 1) return cls.GLOBAL_VARIABLES @tf_export("add_to_collection") def add_to_collection(name, value): """Wrapper for `Graph.add_to_collection()` using the default graph. See @{tf.Graph.add_to_collection} for more details. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ get_default_graph().add_to_collection(name, value) @tf_export("add_to_collections") def add_to_collections(names, value): """Wrapper for `Graph.add_to_collections()` using the default graph. See @{tf.Graph.add_to_collections} for more details. Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ get_default_graph().add_to_collections(names, value) @tf_export("get_collection_ref") def get_collection_ref(key): """Wrapper for `Graph.get_collection_ref()` using the default graph. See @{tf.Graph.get_collection_ref} for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection_ref(key) @tf_export("get_collection") def get_collection(key, scope=None): """Wrapper for `Graph.get_collection()` using the default graph. See @{tf.Graph.get_collection} for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose `name` attribute matches using `re.match`. Items without a `name` attribute are never returned if a scope is supplied and the choice or `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection(key, scope) def get_all_collection_keys(): """Returns a list of collections used in the default graph.""" return get_default_graph().get_all_collection_keys() name_scope_cache = {} # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export("name_scope", "keras.backend.name_scope") class name_scope(object): # pylint: disable=invalid-name """A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see @{tf.Graph.name_scope} for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` """ @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. """ self._name = default_name if name is None else name self._default_name = default_name self._values = values self._ctx = context.context() self._in_eager_mode = self._ctx.executing_eagerly() def __enter__(self): """Start the scope block. Returns: The scope name. Raises: ValueError: if neither `name` nor `default_name` is provided but `values` are. """ if self._in_eager_mode: self._old_name = self._ctx.scope_name if not self._name: scope_name = "" else: cache_key = self._name, self._old_name, self._default_name if cache_key in name_scope_cache: self._ctx.scope_name = name_scope_cache[cache_key] return self._ctx.scope_name elif self._name[-1] == "/": # A trailing slash breaks out of nested name scopes, indicating a # fully specified scope name, for compatibility with Graph.name_scope. scope_name = self._name else: name_with_trailing_slash = self._name + "/" scope_name = ( self._old_name + name_with_trailing_slash if self._old_name else name_with_trailing_slash) name_scope_cache[cache_key] = scope_name self._ctx.scope_name = scope_name return scope_name else: if self._name is None and self._values is not None: # We only raise an error if values is not None (provided) because # currently tf.name_scope(None) (values=None then) is sometimes used as # an idiom to reset to top scope. raise ValueError( "At least one of name (%s) and default_name (%s) must be provided." % (self._name, self._default_name)) if self._values is None: self._values = [] g = _get_graph_from_inputs(self._values) self._g_manager = g.as_default() self._g_manager.__enter__() try: self._name_scope = g.name_scope(self._name) return self._name_scope.__enter__() except: self._g_manager.__exit__(*sys.exc_info()) raise def __exit__(self, type_arg, value_arg, traceback_arg): if self._in_eager_mode: self._ctx.scope_name = self._old_name else: self._name_scope.__exit__(type_arg, value_arg, traceback_arg) self._g_manager.__exit__(type_arg, value_arg, traceback_arg) return False # False values do not suppress exceptions def strip_name_scope(name, export_scope): """Removes name scope from a name. Args: name: A `string` name. export_scope: Optional `string`. Name scope to remove. Returns: Name with name scope removed, or the original name if export_scope is None. """ if export_scope: if export_scope[-1] == "/": export_scope = export_scope[:-1] try: # Strips export_scope/, export_scope///, # ^export_scope/, loc:@export_scope/. str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)" return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name def prepend_name_scope(name, import_scope): """Prepends name scope to a name. Args: name: A `string` name. import_scope: Optional `string`. Name scope to add. Returns: Name with name scope added, or the original name if import_scope is None. """ if import_scope: if import_scope[-1] == "/": import_scope = import_scope[:-1] try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", compat.as_str(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name # pylint: disable=g-doc-return-or-yield # pylint: disable=not-context-manager @tf_export("op_scope") @tf_contextlib.contextmanager def op_scope(values, name, default_name=None): """DEPRECATED. Same as name_scope above, just different argument order.""" logging.warn("tf.op_scope(values, name, default_name) is deprecated," " use tf.name_scope(name, default_name, values)") with name_scope(name, default_name=default_name, values=values) as scope: yield scope _proto_function_registry = registry.Registry("proto functions") def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None): """Registers `to_proto` and `from_proto` functions for collection_name. `to_proto` function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. `from_proto` function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as `saver_pb2.SaverDef`, `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`.. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion. """ if to_proto and not callable(to_proto): raise TypeError("to_proto must be callable.") if from_proto and not callable(from_proto): raise TypeError("from_proto must be callable.") _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name) def get_collection_proto_type(collection_name): """Returns the proto_type for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[0] except LookupError: return None def get_to_proto_function(collection_name): """Returns the to_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[1] except LookupError: return None def get_from_proto_function(collection_name): """Returns the from_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None def _assert_collection_is_ok(collection_name): if context.executing_eagerly(): if collection_name in GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access raise ValueError( "variable collections are not supported when eager execution is enabled." ) def _operation_conversion_error(op, dtype=None, name=None, as_ref=False): """Produce a nice error if someone converts an Operation to a Tensor.""" raise TypeError(("Can't convert Operation '%s' to Tensor " "(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype, name, as_ref)) register_tensor_conversion_function(Operation, _operation_conversion_error)
eaplatanios/tensorflow
tensorflow/python/framework/ops.py
Python
apache-2.0
221,991
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from robot.errors import ExecutionFailed, DataError, PassExecution from robot.model import SuiteVisitor from robot.result import TestSuite, Result from robot.utils import get_timestamp, is_list_like, NormalizedDict, unic from robot.variables import VariableScopes from .context import EXECUTION_CONTEXTS from .steprunner import StepRunner from .namespace import IMPORTER, Namespace from .status import SuiteStatus, TestStatus from .timeouts import TestTimeout # Some 'extract method' love needed here. Perhaps even 'extract class'. class Runner(SuiteVisitor): def __init__(self, output, settings): self.result = None self._output = output self._settings = settings self._variables = VariableScopes(settings) self._suite = None self._suite_status = None self._executed_tests = None @property def _context(self): return EXECUTION_CONTEXTS.current def start_suite(self, suite): self._output.library_listeners.new_suite_scope() result = TestSuite(source=suite.source, name=suite.name, doc=suite.doc, metadata=suite.metadata, starttime=get_timestamp()) if not self.result: result.set_criticality(self._settings.critical_tags, self._settings.non_critical_tags) self.result = Result(root_suite=result) self.result.configure(status_rc=self._settings.status_rc, stat_config=self._settings.statistics_config) else: self._suite.suites.append(result) self._suite = result self._suite_status = SuiteStatus(self._suite_status, self._settings.exit_on_failure, self._settings.exit_on_error, self._settings.skip_teardown_on_exit) ns = Namespace(self._variables, result, suite.resource) ns.start_suite() ns.variables.set_from_variable_table(suite.resource.variables) EXECUTION_CONTEXTS.start_suite(result, ns, self._output, self._settings.dry_run) self._context.set_suite_variables(result) if not self._suite_status.failures: ns.handle_imports() ns.variables.resolve_delayed() result.doc = self._resolve_setting(result.doc) result.metadata = [(self._resolve_setting(n), self._resolve_setting(v)) for n, v in result.metadata.items()] self._context.set_suite_variables(result) self._output.start_suite(ModelCombiner(suite, result, tests=suite.tests, suites=suite.suites, test_count=suite.test_count)) self._output.register_error_listener(self._suite_status.error_occurred) self._run_setup(suite.keywords.setup, self._suite_status) self._executed_tests = NormalizedDict(ignore='_') def _resolve_setting(self, value): if is_list_like(value): return self._variables.replace_list(value, ignore_errors=True) return self._variables.replace_string(value, ignore_errors=True) def end_suite(self, suite): self._suite.message = self._suite_status.message self._context.report_suite_status(self._suite.status, self._suite.full_message) with self._context.suite_teardown(): failure = self._run_teardown(suite.keywords.teardown, self._suite_status) if failure: self._suite.suite_teardown_failed(unic(failure)) if self._suite.statistics.critical.failed: self._suite_status.critical_failure_occurred() self._suite.endtime = get_timestamp() self._suite.message = self._suite_status.message self._context.end_suite(ModelCombiner(suite, self._suite)) self._suite = self._suite.parent self._suite_status = self._suite_status.parent self._output.library_listeners.discard_suite_scope() if not suite.parent: IMPORTER.close_global_library_listeners() def visit_test(self, test): if test.name in self._executed_tests: self._output.warn("Multiple test cases with name '%s' executed in " "test suite '%s'." % (test.name, self._suite.longname)) self._executed_tests[test.name] = True result = self._suite.tests.create(name=test.name, doc=self._resolve_setting(test.doc), tags=self._resolve_setting(test.tags), starttime=get_timestamp(), timeout=self._get_timeout(test)) self._context.start_test(result) self._output.start_test(ModelCombiner(test, result)) status = TestStatus(self._suite_status, result.critical) if not status.failures and not test.name: status.test_failed('Test case name cannot be empty.') if not status.failures and not test.keywords.normal: status.test_failed('Test case contains no keywords.') if status.exit: self._add_exit_combine() result.tags.add('robot-exit') self._run_setup(test.keywords.setup, status, result) try: if not status.failures: StepRunner(self._context, test.template).run_steps(test.keywords.normal) else: status.test_failed(status.message) except PassExecution as exception: err = exception.earlier_failures if err: status.test_failed(err) else: result.message = exception.message except ExecutionFailed as err: status.test_failed(err) result.status = status.status result.message = status.message or result.message if status.teardown_allowed: with self._context.test_teardown(result): failure = self._run_teardown(test.keywords.teardown, status, result) if failure and result.critical: status.critical_failure_occurred() if not status.failures and result.timeout and result.timeout.timed_out(): status.test_failed(result.timeout.get_message()) result.message = status.message result.status = status.status result.endtime = get_timestamp() self._output.end_test(ModelCombiner(test, result)) self._context.end_test(result) def _add_exit_combine(self): exit_combine = ('NOT robot-exit', '') if exit_combine not in self._settings['TagStatCombine']: self._settings['TagStatCombine'].append(exit_combine) def _get_timeout(self, test): if not test.timeout: return None return TestTimeout(test.timeout.value, test.timeout.message, self._variables) def _run_setup(self, setup, status, result=None): if not status.failures: exception = self._run_setup_or_teardown(setup) status.setup_executed(exception) if result and isinstance(exception, PassExecution): result.message = exception.message def _run_teardown(self, teardown, status, result=None): if status.teardown_allowed: exception = self._run_setup_or_teardown(teardown) status.teardown_executed(exception) failed = not isinstance(exception, PassExecution) if result and exception: result.message = status.message if failed else exception.message return exception if failed else None def _run_setup_or_teardown(self, data): if not data: return None try: name = self._variables.replace_string(data.name) except DataError as err: if self._settings.dry_run: return None return err if name.upper() in ('', 'NONE'): return None try: StepRunner(self._context).run_step(data, name=name) except ExecutionFailed as err: return err class ModelCombiner(object): def __init__(self, data, result, **priority): self.data = data self.result = result self.priority = priority def __getattr__(self, name): if name in self.priority: return self.priority[name] if hasattr(self.result, name): return getattr(self.result, name) if hasattr(self.data, name): return getattr(self.data, name) raise AttributeError(name)
alexandrul-ci/robotframework
src/robot/running/runner.py
Python
apache-2.0
9,678
# Copyright 2015, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from neutron import context from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 with mock.patch.dict(sys.modules, {'a10_neutron_lbaas': mock.Mock()}): from neutron_lbaas.drivers.a10networks import driver_v2 class FakeModel(object): def __init__(self, id): self.id = id self.address = '1.1.1.1' self.tenant_id = "tennant-was-a-great-doctor" class ManagerTest(object): def __init__(self, parent, manager, model, mocked_root): self.parent = parent self.context = parent.context self.driver = parent.driver self.manager = manager self.model = model self.mocked_root = mocked_root self.create(model) self.update(model, model) self.delete(model) def create(self, model): self.manager.create(self.context, model) self.mocked_root.create.assert_called_with(self.context, model) def update(self, old_model, model): self.manager.update(self.context, old_model, model) self.mocked_root.update.assert_called_with(self.context, old_model, model) def delete(self, model): self.manager.delete(self.context, model) self.mocked_root.delete.assert_called_with(self.context, model) def refresh(self): self.manager.refresh(self.context, self.model) self.mocked_root.refresh.assert_called_with(self.context, self.model) def stats(self): self.manager.stats(self.context, self.model) self.mocked_root.stats.assert_called_with(self.context, self.model) class TestA10ThunderDriver(test_db_loadbalancerv2.LbaasPluginDbTestCase): def setUp(self): super(TestA10ThunderDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver_v2.ThunderDriver(self.plugin) self.driver.a10 = mock.Mock() def test_load_balancer_ops(self): m = ManagerTest(self, self.driver.load_balancer, FakeModel("loadbalancer-a10"), self.driver.a10.lb) m.refresh() m.stats() def test_listener_ops(self): ManagerTest(self, self.driver.listener, FakeModel("listener-a10"), self.driver.a10.listener) def test_pool_ops(self): ManagerTest(self, self.driver.pool, FakeModel("pool-10"), self.driver.a10.pool) def test_member_ops(self): ManagerTest(self, self.driver.member, FakeModel("member-a10"), self.driver.a10.member) def test_health_monitor_ops(self): ManagerTest(self, self.driver.health_monitor, FakeModel("hm-a10"), self.driver.a10.hm)
gandelman-a/neutron-lbaas
neutron_lbaas/tests/unit/drivers/a10networks/test_driver_v2.py
Python
apache-2.0
3,412
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helpers to convert variables to constants in TensorFlow 2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import variable_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.eager import wrap_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.grappler import tf_optimizer from tensorflow.python.ops import array_ops from tensorflow.python.util import object_identity from tensorflow.python.training.saver import export_meta_graph _CONDITIONAL_OPS = set(["If", "StatelessIf"]) _LOOP_OPS = set(["While", "StatelessWhile"]) _CONTROL_FLOW_OPS = _CONDITIONAL_OPS.union(_LOOP_OPS) def disable_lower_using_switch_merge(graph_def): """Set '_lower_using_switch_merge' attributes to False. Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs in each function's graph. Args: graph_def: GraphDef proto. Returns: GraphDef """ output_graph_def = graph_pb2.GraphDef() output_graph_def.CopyFrom(graph_def) def disable_control_flow_lowering(node): if node.op in _CONTROL_FLOW_OPS: node.attr["_lower_using_switch_merge"].b = False for node in output_graph_def.node: disable_control_flow_lowering(node) if output_graph_def.library: for func in output_graph_def.library.function: for node in func.node_def: disable_control_flow_lowering(node) return output_graph_def def _run_inline_graph_optimization(func, lower_control_flow): """Apply function inline optimization to the graph. Returns the GraphDef after Grappler's function inlining optimization is applied. This optimization does not work on models with control flow. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: GraphDef """ graph_def = func.graph.as_graph_def() if not lower_control_flow: graph_def = disable_lower_using_switch_merge(graph_def) # In some cases, a secondary implementation of the function (e.g. for GPU) is # written to the "api_implements" attribute. (e.g. `tf.keras.layers.LSTM` in # TF2 produces a CuDNN-based RNN for GPU). # This function suppose to inline all functions calls, but "api_implements" # prevents this from happening. Removing the attribute solves the problem. # To learn more about "api_implements", see: # tensorflow/core/grappler/optimizers/implementation_selector.h for function in graph_def.library.function: if "api_implements" in function.attr: del function.attr["api_implements"] meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph) # Clear the initializer_name for the variables collections, since they are not # needed after saved to saved_model. for name in [ "variables", "model_variables", "trainable_variables", "local_variables" ]: raw_list = [] for raw in meta_graph.collection_def["variables"].bytes_list.value: variable = variable_pb2.VariableDef() variable.ParseFromString(raw) variable.ClearField("initializer_name") raw_list.append(variable.SerializeToString()) meta_graph.collection_def[name].bytes_list.value[:] = raw_list # Add a collection 'train_op' so that Grappler knows the outputs. fetch_collection = meta_graph_pb2.CollectionDef() for array in func.inputs + func.outputs: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) # Initialize RewriterConfig with everything disabled except function inlining. config = config_pb2.ConfigProto() rewrite_options = config.graph_options.rewrite_options rewrite_options.min_graph_nodes = -1 # do not skip small graphs rewrite_options.optimizers.append("function") return tf_optimizer.OptimizeGraph(config, meta_graph) def _get_tensor_name(name): """Returns the name of the input tensor. Args: name: str Returns: str """ return name.split(":")[0] def _get_new_function_name(name): """Returns the function name with '_frozen' appended. Args: name: str Returns: str """ return name + "_frozen" def _get_node_defs_list(graph_def): """Returns a list of NodeDefs in the GraphDef. This list consists of all NodeDefs in the main graph as well as all control flow NodeDefs in the functions. The remaining NodeDefs in the functions are not included because the op names are not unique and the variables are handled differently than the main graph. The control flow ops need to be extracted because they are need their attributes to be updated similar to the control flow ops in the main graph. Args: graph_def: GraphDef proto. Returns: [NodeDef] """ node_defs = list(graph_def.node) if graph_def.library: for func in graph_def.library.function: node_defs.extend( [node for node in func.node_def if node.op in _CONTROL_FLOW_OPS]) return node_defs def _get_tensor_data(func): """Gets the tensor data for all Placeholders in the model. Returns a dictionary that maps the tensor name to a dictionary containing: data: numpy data index: int index in func.graph.captures is_variable: bool indicating whether the tensor is a variable or not Args: func: ConcreteFunction. Returns: Dict """ tensor_data = {} map_index_to_variable = {} for var in func.graph.variables: for idx, captured_input in enumerate(func.captured_inputs): if var.handle is captured_input: # pylint: disable=protected-access map_index_to_variable[idx] = var break # Iterates through all captures which are represented as Placeholders. for idx, (val_tensor, name_tensor) in enumerate(func.graph.captures): tensor_name = _get_tensor_name(name_tensor.name) is_variable = idx in map_index_to_variable if is_variable: data = map_index_to_variable[idx].numpy() else: data = val_tensor.numpy() tensor_data[tensor_name] = { "data": data, "index": idx, "is_variable": is_variable, } return tensor_data def _get_control_flow_function_data(node_defs, tensor_data, name_to_node): """Gets the types and shapes for the parameters to the function. Creates a map from function name to a list of types and a list of shapes that correspond with the function arguments. The data is primarily determined from the corresponding "If" or "While" op. If the argument is a resource variable, then the type is determined from the type of the data contained within the Tensor. The shape data is only determined in the case of the "While" op. `is_also_output_type` is used to identify the "While" bodies that require the output types to be updated at the same time the input types are updated. Args: node_defs: List of NodeDefs. tensor_data: {str name : Tensor}. name_to_node: Dictionary mapping node name to node object. Returns: {str function name : {"types" : [int representing DataType], "shapes" : [[int] representing TensorShape]], "is_also_output_type" : bool} """ func_data = {} def get_source_node_name_through_identities(node_name): # Trace the source node along with a chain of Identity nodes. # For example, given Plaecholder -> Identity -> Identity -> node_name # The function will return the name of the Placeholder. while name_to_node[node_name].op == "Identity": node_name = _get_tensor_name(name_to_node[node_name].input[0]) return node_name def get_resource_type(node_name): node_name = get_source_node_name_through_identities(node_name) numpy_type = tensor_data[node_name]["data"].dtype return dtypes.as_dtype(numpy_type).as_datatype_enum def get_resource_shape(node_name): node_name = get_source_node_name_through_identities(node_name) return tensor_shape_pb2.TensorShapeProto(dim=[ tensor_shape_pb2.TensorShapeProto.Dim(size=dim) for dim in tensor_data[node_name]["data"].shape ]) def add_value(func_name, arg_types, output_shapes, is_also_output_type): func_data[func_name] = { "types": arg_types, "shapes": output_shapes, "is_also_output_type": is_also_output_type } for node in node_defs: if node.op in _CONDITIONAL_OPS: arg_types = [dtype for dtype in node.attr["Tin"].list.type] for idx in range(len(arg_types)): if arg_types[idx] == dtypes.resource: # Skip first index which represents the condition. arg_types[idx] = get_resource_type(node.input[idx + 1]) add_value(node.attr["then_branch"].func.name, arg_types, None, False) add_value(node.attr["else_branch"].func.name, arg_types, None, False) elif node.op in _LOOP_OPS: arg_types = [dtype for dtype in node.attr["T"].list.type] output_shapes = [shape for shape in node.attr["output_shapes"].list.shape] for idx in range(len(arg_types)): if arg_types[idx] == dtypes.resource: input_name = node.input[idx] arg_types[idx] = get_resource_type(input_name) output_shapes[idx] = get_resource_shape(input_name) add_value(node.attr["body"].func.name, arg_types, output_shapes, True) add_value(node.attr["cond"].func.name, arg_types, output_shapes, False) return func_data def _populate_const_op(output_node, node_name, dtype, data, data_shape): """Creates a Const op. Args: output_node: TensorFlow NodeDef. node_name: str node name. dtype: AttrValue with a populated .type field. data: numpy data value. data_shape: Tuple of integers containing data shape. """ output_node.op = "Const" output_node.name = node_name output_node.attr["dtype"].CopyFrom(dtype) tensor = tensor_util.make_tensor_proto( data, dtype=dtype.type, shape=data_shape) output_node.attr["value"].tensor.CopyFrom(tensor) def _populate_identity_op(output_node, input_node): """Creates an Identity op from a ReadVariable op. Args: output_node: TensorFlow NodeDef. input_node: TensorFlow NodeDef. """ output_node.op = "Identity" output_node.name = input_node.name output_node.input.append(input_node.input[0]) output_node.attr["T"].CopyFrom(input_node.attr["dtype"]) if "_class" in input_node.attr: output_node.attr["_class"].CopyFrom(input_node.attr["_class"]) def _populate_if_op(output_node, input_node, function_data): """Updates the type attributes and function names of If or StatelessIf. Args: output_node: TensorFlow NodeDef. input_node: TensorFlow NodeDef. function_data: Map of function names to the list of types and shapes that correspond with the function arguments. """ output_node.CopyFrom(input_node) then_func = input_node.attr["then_branch"].func.name output_node.attr["then_branch"].func.name = _get_new_function_name(then_func) output_node.attr["else_branch"].func.name = _get_new_function_name( input_node.attr["else_branch"].func.name) output_node.attr["Tin"].list.CopyFrom( attr_value_pb2.AttrValue.ListValue( type=function_data[then_func]["types"])) def _populate_while_op(output_node, input_node, function_data): """Updates the type attributes and function names of While or StatelessWhile. Args: output_node: TensorFlow NodeDef. input_node: TensorFlow NodeDef. function_data: Map of function names to the list of types and shapes that correspond with the function arguments. """ output_node.CopyFrom(input_node) cond_func = input_node.attr["cond"].func.name output_node.attr["cond"].func.name = _get_new_function_name(cond_func) output_node.attr["body"].func.name = _get_new_function_name( input_node.attr["body"].func.name) output_node.attr["T"].list.CopyFrom( attr_value_pb2.AttrValue.ListValue( type=function_data[cond_func]["types"])) output_node.attr["output_shapes"].list.CopyFrom( attr_value_pb2.AttrValue.ListValue( shape=function_data[cond_func]["shapes"])) def _construct_concrete_function(func, output_graph_def, converted_input_indices): """Constructs a concrete function from the `output_graph_def`. Args: func: ConcreteFunction output_graph_def: GraphDef proto. converted_input_indices: Set of integers of input indices that were converted to constants. Returns: ConcreteFunction. """ # Create a ConcreteFunction from the new GraphDef. input_tensors = func.graph.internal_captures converted_inputs = object_identity.ObjectIdentitySet( [input_tensors[index] for index in converted_input_indices]) not_converted_inputs = [ tensor for tensor in func.inputs if tensor not in converted_inputs] not_converted_inputs_map = { tensor.name: tensor for tensor in not_converted_inputs } new_input_names = [tensor.name for tensor in not_converted_inputs] new_output_names = [tensor.name for tensor in func.outputs] new_func = wrap_function.function_from_graph_def(output_graph_def, new_input_names, new_output_names) # Manually propagate shape for input tensors where the shape is not correctly # propagated. Scalars shapes are lost when wrapping the function. for input_tensor in new_func.inputs: input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape) return new_func def _convert_variables_to_constants_v2_impl(func, lower_control_flow=True): """Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Note that the NodeDefs in the returned GraphDef contains the original node names if they are created by the graph optimization. Converting the GraphDef to concrete function will lose these debug information. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: GraphDef containing a simplified version of the original and converted input indices that were converted to constants. """ # Inline the graph in order to remove functions when possible. graph_def = _run_inline_graph_optimization(func, lower_control_flow) # Gets list of all node defs include those in the library. node_defs = _get_node_defs_list(graph_def) # Get mapping from node name to node. name_to_node = {_get_tensor_name(node.name): node for node in node_defs} # Get mapping from node name to variable value. tensor_data = _get_tensor_data(func) # Get mapping from function name to argument types. function_data = _get_control_flow_function_data( node_defs, tensor_data, name_to_node) # Get variable data for all nodes in `node_defs`. reference_variables = {} resource_identities = {} placeholders = {} converted_input_indices = set() def _save_placeholder(node_name, dtype): placeholders[node_name] = { "dtype": dtype, "data": tensor_data[node_name]["data"], } converted_input_indices.add(tensor_data[node_name]["index"]) for node in node_defs: if node.op in _CONDITIONAL_OPS: # Get dtype and data for resource Placeholders. then_func = node.attr["then_branch"].func.name arg_types = function_data[then_func]["types"] for idx, input_tensor in enumerate(node.input[1:]): input_name = _get_tensor_name(input_tensor) if input_name in tensor_data: dtype = attr_value_pb2.AttrValue(type=arg_types[idx]) _save_placeholder(_get_tensor_name(input_tensor), dtype) elif node.op in _LOOP_OPS: # Get dtype and data for resource Placeholders. cond_func = node.attr["cond"].func.name arg_types = function_data[cond_func]["types"] for idx, input_tensor in enumerate(node.input): input_name = _get_tensor_name(input_tensor) if input_name in tensor_data: dtype = attr_value_pb2.AttrValue(type=arg_types[idx]) _save_placeholder(_get_tensor_name(input_tensor), dtype) elif (node.op == "Identity" and node.attr["T"].type == dtypes.resource and name_to_node[_get_tensor_name(node.input[0])].op in _LOOP_OPS): # Store the dtype for Identity resource ops that are outputs of While ops. while_node = name_to_node[_get_tensor_name(node.input[0])] body_func = while_node.attr["body"].func.name input_data = node.input[0].split(":") idx = 0 if len(input_data) == 1 else int(input_data[1]) dtype = attr_value_pb2.AttrValue( type=function_data[body_func]["types"][idx]) resource_identities[node.name] = dtype elif node.op == "VariableV2": # Get data for VariableV2 ops (reference variables) that cannot be lifted. with func.graph.as_default(): identity_node = array_ops.identity( func.graph.as_graph_element(node.name + ":0")) reference_variables[node.name] = ( func.prune([], [identity_node.name])()[0]) elif node.name in tensor_data and not tensor_data[node.name]["is_variable"]: # Get dtype and data for non-variable Placeholders (ex. values for 1.X # Const ops that are loaded as Placeholders in 2.0) _save_placeholder(node.name, node.attr["dtype"]) elif node.op in ["ReadVariableOp", "ResourceGather"]: # Get dtype and data for Placeholder ops associated with ReadVariableOp # and ResourceGather ops. There can be an Identity in between the # resource op and Placeholder. Store the dtype for the Identity ops. input_name = _get_tensor_name(node.input[0]) while name_to_node[input_name].op == "Identity": resource_identities[input_name] = node.attr["dtype"] input_name = _get_tensor_name(name_to_node[input_name].input[0]) if name_to_node[input_name].op != "Placeholder": raise ValueError("Cannot find the Placeholder op that is an input " "to the ReadVariableOp.") _save_placeholder(input_name, node.attr["dtype"]) # Reconstruct the graph with constants in place of variables. output_graph_def = graph_pb2.GraphDef() for input_node in graph_def.node: output_node = output_graph_def.node.add() # Convert VariableV2 ops to Const ops. if input_node.name in reference_variables: data = reference_variables[input_node.name] dtype = attr_value_pb2.AttrValue(type=data.dtype.as_datatype_enum) _populate_const_op(output_node, input_node.name, dtype, data.numpy(), data.shape) # Convert Placeholder ops to Const ops. elif input_node.name in placeholders: data = placeholders[input_node.name]["data"] dtype = placeholders[input_node.name]["dtype"] _populate_const_op(output_node, input_node.name, dtype, data, data.shape) # Update the dtype for Identity ops that are inputs to ReadVariableOps. elif input_node.name in resource_identities: output_node.CopyFrom(input_node) output_node.attr["T"].CopyFrom(resource_identities[input_node.name]) # Convert ReadVariableOps to Identity ops. elif input_node.op == "ReadVariableOp": _populate_identity_op(output_node, input_node) # Convert ResourceGather to Gather ops with a Const axis feeding into it. elif input_node.op == "ResourceGather": if input_node.attr["batch_dims"].i != 0: raise ValueError("batch_dims != 0 is not supported by freeze_graph.") output_axis_node = output_graph_def.node.add() axis_node_name = input_node.name + "/axis" axis_dtype = input_node.attr["Tindices"] axis_data = np.array(input_node.attr["batch_dims"].i) _populate_const_op(output_axis_node, axis_node_name, axis_dtype, axis_data, axis_data.shape) output_node.op = "GatherV2" output_node.name = input_node.name output_node.input.extend( [input_node.input[0], input_node.input[1], axis_node_name]) output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"]) output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"]) output_node.attr["Taxis"].CopyFrom(axis_dtype) if "_class" in input_node.attr: output_node.attr["_class"].CopyFrom(input_node.attr["_class"]) # Update the function names and argument types for the conditional ops. elif input_node.op in _CONDITIONAL_OPS: _populate_if_op(output_node, input_node, function_data) elif input_node.op in _LOOP_OPS: _populate_while_op(output_node, input_node, function_data) else: output_node.CopyFrom(input_node) # Add functions to reconstructed graph. if graph_def.library: library = output_graph_def.library for input_library_func in graph_def.library.function: orig_func_name = input_library_func.signature.name new_func_name = _get_new_function_name(orig_func_name) # Do not copy any functions that aren't being used in the graph. Any # functions that are not used by control flow should have been inlined. if orig_func_name not in function_data: continue output_library_func = library.function.add() for key, value in input_library_func.ret.items(): output_library_func.ret[key] = value for key, value in input_library_func.control_ret.items(): output_library_func.control_ret[key] = value # Update the input types in the function signature. Update the output # types for functions that are while loop bodies. output_library_func.signature.CopyFrom(input_library_func.signature) output_library_func.signature.name = new_func_name for dtype, arg in zip(function_data[orig_func_name]["types"], output_library_func.signature.input_arg): arg.type = dtype if function_data[orig_func_name]["is_also_output_type"]: for dtype, arg in zip(function_data[orig_func_name]["types"], output_library_func.signature.output_arg): arg.type = dtype # Update the NodeDefs. func_variables = { node.name: node.input[0] for node in input_library_func.node_def if node.op == "ReadVariableOp" } for input_node in input_library_func.node_def: output_node = output_library_func.node_def.add() # Convert ReadVariableOps to Identity ops. if input_node.op == "ReadVariableOp": _populate_identity_op(output_node, input_node) # Update the function names and argument types for the conditional ops. elif input_node.op in _CONDITIONAL_OPS: _populate_if_op(output_node, input_node, function_data) elif input_node.op in _LOOP_OPS: _populate_while_op(output_node, input_node, function_data) else: output_node.CopyFrom(input_node) # Convert :value to :output for ops that use the ReadVariableOp. for idx, full_name in enumerate(input_node.input): input_name = _get_tensor_name(full_name) if input_name in func_variables: full_name_parts = full_name.split(":") full_name_parts[1] = "output" input_name = ":".join(full_name_parts) output_node.input[idx] = input_name output_graph_def.versions.CopyFrom(graph_def.versions) return (output_graph_def, converted_input_indices) def convert_variables_to_constants_v2(func, lower_control_flow=True): """Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: ConcreteFunction containing a simplified version of the original. """ output_graph_def, converted_inputs = _convert_variables_to_constants_v2_impl( func, lower_control_flow) return _construct_concrete_function(func, output_graph_def, converted_inputs) def convert_variables_to_constants_v2_as_graph(func, lower_control_flow=True): """Replaces all the variables in a graph with constants of the same values. This function works as same as convert_variables_to_constants_v2, but it returns the intermediate `GraphDef` as well. This `GraphDef` contains all the debug information after all the transformations in the frozen phase. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: ConcreteFunction containing a simplified version of the original, and also the intermediate GraphDef containing the node debug information for the transformations in the frozen phase. """ graph_def, converted_inputs = _convert_variables_to_constants_v2_impl( func, lower_control_flow) frozen_func = _construct_concrete_function(func, graph_def, converted_inputs) return frozen_func, graph_def
ppwwyyxx/tensorflow
tensorflow/python/framework/convert_to_constants.py
Python
apache-2.0
27,106
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for CTC (Connectionist Temporal Classification). @@ctc_loss @@ctc_greedy_decoder @@ctc_beam_search_decoder """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import from tensorflow.contrib.ctc.ctc_ops import *
shakamunyi/tensorflow
tensorflow/contrib/ctc/__init__.py
Python
apache-2.0
1,014
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example adds campaigns. To get campaigns, run get_campaigns.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. Tags: CampaignService.mutate Tags: BudgetService.mutate """ __author__ = ('api.kwinter@gmail.com (Kevin Winter)' 'Joseph DiLallo') import datetime import uuid from googleads import adwords def main(client): # Initialize appropriate services. campaign_service = client.GetService('CampaignService', version='v201506') budget_service = client.GetService('BudgetService', version='v201506') # Create a budget, which can be shared by multiple campaigns. budget = { 'name': 'Interplanetary budget #%s' % uuid.uuid4(), 'amount': { 'microAmount': '50000000' }, 'deliveryMethod': 'STANDARD', 'period': 'DAILY' } budget_operations = [{ 'operator': 'ADD', 'operand': budget }] # Add the budget. budget_id = budget_service.mutate(budget_operations)['value'][0][ 'budgetId'] # Construct operations and add campaigns. operations = [{ 'operator': 'ADD', 'operand': { 'name': 'Interplanetary Cruise #%s' % uuid.uuid4(), 'status': 'PAUSED', 'advertisingChannelType': 'SEARCH', 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC', }, 'endDate': (datetime.datetime.now() + datetime.timedelta(365)).strftime('%Y%m%d'), # Note that only the budgetId is required 'budget': { 'budgetId': budget_id }, 'networkSetting': { 'targetGoogleSearch': 'true', 'targetSearchNetwork': 'true', 'targetContentNetwork': 'false', 'targetPartnerSearchNetwork': 'false' }, # Optional fields 'startDate': (datetime.datetime.now() + datetime.timedelta(1)).strftime('%Y%m%d'), 'adServingOptimizationStatus': 'ROTATE', 'frequencyCap': { 'impressions': '5', 'timeUnit': 'DAY', 'level': 'ADGROUP' }, 'settings': [ { 'xsi_type': 'GeoTargetTypeSetting', 'positiveGeoTargetType': 'DONT_CARE', 'negativeGeoTargetType': 'DONT_CARE' } ] } }, { 'operator': 'ADD', 'operand': { 'name': 'Interplanetary Cruise banner #%s' % uuid.uuid4(), 'status': 'PAUSED', 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC' }, 'endDate': (datetime.datetime.now() + datetime.timedelta(365)).strftime('%Y%m%d'), # Note that only the budgetId is required 'budget': { 'budgetId': budget_id }, 'advertisingChannelType': 'DISPLAY' } }] campaigns = campaign_service.mutate(operations) # Display results. for campaign in campaigns['value']: print ('Campaign with name \'%s\' and id \'%s\' was added.' % (campaign['name'], campaign['id'])) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client)
coxmediagroup/googleads-python-lib
examples/adwords/v201506/basic_operations/add_campaigns.py
Python
apache-2.0
4,119
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import tabs class OverviewTab(tabs.Tab): name = _("Overview") slug = "overview" template_name = "project/cg_snapshots/_detail_overview.html" def get_context_data(self, request): cg_snapshot = self.tab_group.kwargs['cg_snapshot'] return {"cg_snapshot": cg_snapshot} def get_redirect_url(self): return reverse('horizon:project:cg_snapshots:index') class CGSnapshotsDetailTabs(tabs.TabGroup): slug = "cg_snapshots_details" tabs = (OverviewTab,)
BiznetGIO/horizon
openstack_dashboard/dashboards/project/cg_snapshots/tabs.py
Python
apache-2.0
1,188
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simplified chat demo for websockets. Authentication, error handling, etc are left as an exercise for the reader :) """ import os.path import uuid import sys import time from collections import defaultdict from twisted.python import log from twisted.internet import reactor, task import cyclone.escape import cyclone.web import cyclone.websocket class Application(cyclone.web.Application): def __init__(self): stats = Stats() handlers = [ (r"/", MainHandler, dict(stats=stats)), (r"/stats", StatsPageHandler), (r"/statssocket", StatsSocketHandler, dict(stats=stats)), (r"/chatsocket", ChatSocketHandler, dict(stats=stats)), ] settings = dict( cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, autoescape=None, ) cyclone.web.Application.__init__(self, handlers, **settings) class MainHandler(cyclone.web.RequestHandler): def initialize(self, stats): self.stats = stats def get(self): self.stats.newVisit() self.render("index.html", messages=ChatSocketHandler.cache) class ChatSocketHandler(cyclone.websocket.WebSocketHandler): waiters = set() cache = [] cache_size = 200 def initialize(self, stats): self.stats = stats def connectionMade(self): ChatSocketHandler.waiters.add(self) self.stats.newChatter() def connectionLost(self, reason): ChatSocketHandler.waiters.remove(self) self.stats.lostChatter() @classmethod def update_cache(cls, chat): cls.cache.append(chat) if len(cls.cache) > cls.cache_size: cls.cache = cls.cache[-cls.cache_size:] @classmethod def send_updates(cls, chat): log.msg("sending message to %d waiters" % len(cls.waiters)) for waiter in cls.waiters: try: waiter.sendMessage(chat) except Exception, e: log.err("Error sending message. %s" % str(e)) def messageReceived(self, message): log.msg("got message %s" % message) parsed = cyclone.escape.json_decode(message) chat = { "id": str(uuid.uuid4()), "body": parsed["body"], } chat["html"] = self.render_string("message.html", message=chat) ChatSocketHandler.update_cache(chat) ChatSocketHandler.send_updates(chat) class StatsSocketHandler(cyclone.websocket.WebSocketHandler): def initialize(self, stats): self.stats = stats self._updater = task.LoopingCall(self._sendData) def connectionMade(self): self._updater.start(2) def connectionLost(self, reason): self._updater.stop() def _sendData(self): data = dict(visits=self.stats.todaysVisits(), chatters=self.stats.chatters) self.sendMessage(cyclone.escape.json_encode(data)) class Stats(object): def __init__(self): self.visits = defaultdict(int) self.chatters = 0 def todaysVisits(self): today = time.localtime() key = time.strftime('%Y%m%d', today) return self.visits[key] def newChatter(self): self.chatters += 1 def lostChatter(self): self.chatters -= 1 def newVisit(self): today = time.localtime() key = time.strftime('%Y%m%d', today) self.visits[key] += 1 class StatsPageHandler(cyclone.web.RequestHandler): def get(self): self.render("stats.html") def main(): reactor.listenTCP(8888, Application()) reactor.run() if __name__ == "__main__": log.startLogging(sys.stdout) main()
lextoumbourou/cyclone
demos/websocket/chat/chatdemo.py
Python
apache-2.0
4,474
# # Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # import os import stat import argparse import os import shutil import subprocess import platform def ParseArguments(): argMap = {} parser = argparse.ArgumentParser(description="AWSNativeSDK Run all Integration Tests") parser.add_argument("--buildDir", action="store") parser.add_argument("--configuration", action="store") args = vars( parser.parse_args() ) argMap[ "buildDir" ] = args[ "buildDir" ] or "./build" argMap[ "configuration" ] = args[ "configuration" ] or "Debug" return argMap def AddExecutableBit(file): st = os.stat(file) os.chmod(file, st.st_mode | stat.S_IEXEC) def Main(): arguments = ParseArguments() configDir = "" exeExtension = "" #Visual Studio puts executables into a configuration sub-dir, so append that. if platform.system() == "Windows": configDir = arguments["configuration"] exeExtension = ".exe" dynamoDbTest = arguments["buildDir"] + "/aws-cpp-sdk-dynamodb-integration-tests/" + configDir + "/runDynamoDBIntegrationTests" + exeExtension AddExecutableBit(dynamoDbTest) subprocess.check_call(dynamoDbTest) sqsTest = arguments["buildDir"] + "/aws-cpp-sdk-sqs-integration-tests/" + configDir + "/runSqsIntegrationTests" + exeExtension AddExecutableBit(sqsTest) subprocess.check_call(sqsTest) s3Test = arguments["buildDir"] + "/aws-cpp-sdk-s3-integration-tests/" + configDir + "/runS3IntegrationTests" + exeExtension AddExecutableBit(s3Test) subprocess.check_call(s3Test) lambdaTest = arguments["buildDir"] + "/aws-cpp-sdk-lambda-integration-tests/" + configDir + "/runLambdaIntegrationTests" + exeExtension AddExecutableBit(lambdaTest) subprocess.check_call(lambdaTest) cognitoTest = arguments["buildDir"] + "/aws-cpp-sdk-cognitoidentity-integration-tests/" + configDir + "/runCognitoIntegrationTests" + exeExtension AddExecutableBit(cognitoTest) subprocess.check_call(cognitoTest) transferTest = arguments["buildDir"] + "/aws-cpp-sdk-transfer-tests/" + configDir + "/runTransferIntegrationTests" + exeExtension AddExecutableBit(transferTest) subprocess.check_call(transferTest) #These will cost you lots of money, don't run them unless you decide you want to test this functionality #cloudFrontTests = arguments["buildDir"] + "/aws-cpp-sdk-cloudfront-integration-tests/" + configDir + "/runCloudfrontIntegrationTests" + exeExtension #AddExecutableBit(cloudFrontTests) #subprocess.check_call(cloudFrontTests) #redshiftTests = arguments["buildDir"] + "/aws-cpp-sdk-redshift-integration-tests/" + configDir + "/runRedshiftIntegrationTests" + exeExtension #AddExecutableBit(redshiftTests) #subprocess.check_call(redshiftTests) # Run from powershell; make sure msbuild is in PATH environment variable Main()
zeliard/aws-sdk-cpp
scripts/run_integration_tests.py
Python
apache-2.0
3,508
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import io import copy import logging.config import os import shutil import tempfile import unittest import sys import json from urllib.parse import quote_plus from werkzeug.test import Client from airflow import models, configuration, settings from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG from airflow.models import DAG, DagRun, TaskInstance from airflow.operators.dummy_operator import DummyOperator from airflow.settings import Session from airflow.utils.timezone import datetime from airflow.www import app as application from airflow import configuration as conf class TestChartModelView(unittest.TestCase): CREATE_ENDPOINT = '/admin/chart/new/?url=/admin/chart/' @classmethod def setUpClass(cls): super(TestChartModelView, cls).setUpClass() session = Session() session.query(models.Chart).delete() session.query(models.User).delete() session.commit() user = models.User(username='airflow') session.add(user) session.commit() session.close() def setUp(self): super(TestChartModelView, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.session = Session() self.chart = { 'label': 'chart', 'owner': 'airflow', 'conn_id': 'airflow_ci', } def tearDown(self): self.session.query(models.Chart).delete() self.session.commit() self.session.close() super(TestChartModelView, self).tearDown() @classmethod def tearDownClass(cls): session = Session() session.query(models.User).delete() session.commit() session.close() super(TestChartModelView, cls).tearDownClass() def test_create_chart(self): response = self.app.post( self.CREATE_ENDPOINT, data=self.chart, follow_redirects=True, ) self.assertEqual(response.status_code, 200) self.assertEqual(self.session.query(models.Chart).count(), 1) def test_get_chart(self): response = self.app.get( '/admin/chart?sort=3', follow_redirects=True, ) self.assertEqual(response.status_code, 200) self.assertIn('Sort by Owner', response.data.decode('utf-8')) class TestVariableView(unittest.TestCase): CREATE_ENDPOINT = '/admin/variable/new/?url=/admin/variable/' @classmethod def setUpClass(cls): super(TestVariableView, cls).setUpClass() session = Session() session.query(models.Variable).delete() session.commit() session.close() def setUp(self): super(TestVariableView, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.session = Session() self.variable = { 'key': 'test_key', 'val': 'text_val', 'is_encrypted': True } def tearDown(self): self.session.query(models.Variable).delete() self.session.commit() self.session.close() super(TestVariableView, self).tearDown() def test_can_handle_error_on_decrypt(self): # create valid variable response = self.app.post( self.CREATE_ENDPOINT, data=self.variable, follow_redirects=True, ) self.assertEqual(response.status_code, 200) # update the variable with a wrong value, given that is encrypted Var = models.Variable (self.session.query(Var) .filter(Var.key == self.variable['key']) .update({ 'val': 'failed_value_not_encrypted' }, synchronize_session=False)) self.session.commit() # retrieve Variables page, should not fail and contain the Invalid # label for the variable response = self.app.get('/admin/variable', follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertEqual(self.session.query(models.Variable).count(), 1) self.assertIn('<span class="label label-danger">Invalid</span>', response.data.decode('utf-8')) def test_xss_prevention(self): xss = "/admin/airflow/variables/asdf<img%20src=''%20onerror='alert(1);'>" response = self.app.get( xss, follow_redirects=True, ) self.assertEqual(response.status_code, 404) self.assertNotIn("<img src='' onerror='alert(1);'>", response.data.decode("utf-8")) class TestKnownEventView(unittest.TestCase): CREATE_ENDPOINT = '/admin/knownevent/new/?url=/admin/knownevent/' @classmethod def setUpClass(cls): super(TestKnownEventView, cls).setUpClass() session = Session() session.query(models.KnownEvent).delete() session.query(models.User).delete() session.commit() user = models.User(username='airflow') session.add(user) session.commit() cls.user_id = user.id session.close() def setUp(self): super(TestKnownEventView, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.session = Session() self.known_event = { 'label': 'event-label', 'event_type': '1', 'start_date': '2017-06-05 12:00:00', 'end_date': '2017-06-05 13:00:00', 'reported_by': self.user_id, 'description': '', } def tearDown(self): self.session.query(models.KnownEvent).delete() self.session.commit() self.session.close() super(TestKnownEventView, self).tearDown() @classmethod def tearDownClass(cls): session = Session() session.query(models.User).delete() session.commit() session.close() super(TestKnownEventView, cls).tearDownClass() def test_create_known_event(self): response = self.app.post( self.CREATE_ENDPOINT, data=self.known_event, follow_redirects=True, ) self.assertEqual(response.status_code, 200) self.assertEqual(self.session.query(models.KnownEvent).count(), 1) def test_create_known_event_with_end_data_earlier_than_start_date(self): self.known_event['end_date'] = '2017-06-05 11:00:00' response = self.app.post( self.CREATE_ENDPOINT, data=self.known_event, follow_redirects=True, ) self.assertIn( 'Field must be greater than or equal to Start Date.', response.data.decode('utf-8'), ) self.assertEqual(self.session.query(models.KnownEvent).count(), 0) class TestPoolModelView(unittest.TestCase): CREATE_ENDPOINT = '/admin/pool/new/?url=/admin/pool/' @classmethod def setUpClass(cls): super(TestPoolModelView, cls).setUpClass() session = Session() session.query(models.Pool).delete() session.commit() session.close() def setUp(self): super(TestPoolModelView, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.session = Session() self.pool = { 'pool': 'test-pool', 'slots': 777, 'description': 'test-pool-description', } def tearDown(self): self.session.query(models.Pool).delete() self.session.commit() self.session.close() super(TestPoolModelView, self).tearDown() def test_create_pool(self): response = self.app.post( self.CREATE_ENDPOINT, data=self.pool, follow_redirects=True, ) self.assertEqual(response.status_code, 200) self.assertEqual(self.session.query(models.Pool).count(), 1) def test_create_pool_with_same_name(self): # create test pool self.app.post( self.CREATE_ENDPOINT, data=self.pool, follow_redirects=True, ) # create pool with the same name response = self.app.post( self.CREATE_ENDPOINT, data=self.pool, follow_redirects=True, ) self.assertIn('Already exists.', response.data.decode('utf-8')) self.assertEqual(self.session.query(models.Pool).count(), 1) def test_create_pool_with_empty_name(self): self.pool['pool'] = '' response = self.app.post( self.CREATE_ENDPOINT, data=self.pool, follow_redirects=True, ) self.assertIn('This field is required.', response.data.decode('utf-8')) self.assertEqual(self.session.query(models.Pool).count(), 0) class TestLogView(unittest.TestCase): DAG_ID = 'dag_for_testing_log_view' TASK_ID = 'task_for_testing_log_view' DEFAULT_DATE = datetime(2017, 9, 1) ENDPOINT = '/admin/airflow/log?dag_id={dag_id}&task_id={task_id}&execution_date={execution_date}'.format( dag_id=DAG_ID, task_id=TASK_ID, execution_date=DEFAULT_DATE, ) @classmethod def setUpClass(cls): super(TestLogView, cls).setUpClass() session = Session() session.query(TaskInstance).filter( TaskInstance.dag_id == cls.DAG_ID and TaskInstance.task_id == cls.TASK_ID and TaskInstance.execution_date == cls.DEFAULT_DATE).delete() session.commit() session.close() def setUp(self): super(TestLogView, self).setUp() # Create a custom logging configuration configuration.load_test_config() logging_config = copy.deepcopy(DEFAULT_LOGGING_CONFIG) current_dir = os.path.dirname(os.path.abspath(__file__)) logging_config['handlers']['task']['base_log_folder'] = os.path.normpath( os.path.join(current_dir, 'test_logs')) logging_config['handlers']['task']['filename_template'] = \ '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts | replace(":", ".") }}/{{ try_number }}.log' # Write the custom logging configuration to a file self.settings_folder = tempfile.mkdtemp() settings_file = os.path.join(self.settings_folder, "airflow_local_settings.py") new_logging_file = "LOGGING_CONFIG = {}".format(logging_config) with open(settings_file, 'w') as handle: handle.writelines(new_logging_file) sys.path.append(self.settings_folder) conf.set('core', 'logging_config_class', 'airflow_local_settings.LOGGING_CONFIG') app = application.create_app(testing=True) self.app = app.test_client() self.session = Session() from airflow.www.views import dagbag dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE) task = DummyOperator(task_id=self.TASK_ID, dag=dag) dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag) ti = TaskInstance(task=task, execution_date=self.DEFAULT_DATE) ti.try_number = 1 self.session.merge(ti) self.session.commit() def tearDown(self): logging.config.dictConfig(DEFAULT_LOGGING_CONFIG) self.session.query(TaskInstance).filter( TaskInstance.dag_id == self.DAG_ID and TaskInstance.task_id == self.TASK_ID and TaskInstance.execution_date == self.DEFAULT_DATE).delete() self.session.commit() self.session.close() sys.path.remove(self.settings_folder) shutil.rmtree(self.settings_folder) conf.set('core', 'logging_config_class', '') super(TestLogView, self).tearDown() def test_get_file_task_log(self): response = self.app.get( TestLogView.ENDPOINT, follow_redirects=True, ) self.assertEqual(response.status_code, 200) self.assertIn('Log by attempts', response.data.decode('utf-8')) def test_get_logs_with_metadata(self): url_template = "/admin/airflow/get_logs_with_metadata?dag_id={}&" \ "task_id={}&execution_date={}&" \ "try_number={}&metadata={}" response = \ self.app.get(url_template.format(self.DAG_ID, self.TASK_ID, quote_plus(self.DEFAULT_DATE.isoformat()), 1, json.dumps({}))) self.assertIn('"message":', response.data.decode('utf-8')) self.assertIn('"metadata":', response.data.decode('utf-8')) self.assertIn('Log for testing.', response.data.decode('utf-8')) self.assertEqual(200, response.status_code) def test_get_logs_with_null_metadata(self): url_template = "/admin/airflow/get_logs_with_metadata?dag_id={}&" \ "task_id={}&execution_date={}&" \ "try_number={}&metadata=null" response = \ self.app.get(url_template.format(self.DAG_ID, self.TASK_ID, quote_plus(self.DEFAULT_DATE.isoformat()), 1)) self.assertIn('"message":', response.data.decode('utf-8')) self.assertIn('"metadata":', response.data.decode('utf-8')) self.assertIn('Log for testing.', response.data.decode('utf-8')) self.assertEqual(200, response.status_code) class TestVarImportView(unittest.TestCase): IMPORT_ENDPOINT = '/admin/airflow/varimport' @classmethod def setUpClass(cls): super(TestVarImportView, cls).setUpClass() session = Session() session.query(models.User).delete() session.commit() user = models.User(username='airflow') session.add(user) session.commit() session.close() def setUp(self): super(TestVarImportView, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() def tearDown(self): super(TestVarImportView, self).tearDown() @classmethod def tearDownClass(cls): session = Session() session.query(models.User).delete() session.commit() session.close() super(TestVarImportView, cls).tearDownClass() def test_import_variables(self): content = ('{"str_key": "str_value", "int_key": 60,' '"list_key": [1, 2], "dict_key": {"k_a": 2, "k_b": 3}}') try: # python 3+ bytes_content = io.BytesIO(bytes(content, encoding='utf-8')) except TypeError: # python 2.7 bytes_content = io.BytesIO(bytes(content)) response = self.app.post( self.IMPORT_ENDPOINT, data={'file': (bytes_content, 'test.json')}, follow_redirects=True ) self.assertEqual(response.status_code, 200) body = response.data.decode('utf-8') self.assertIn('str_key', body) self.assertIn('int_key', body) self.assertIn('list_key', body) self.assertIn('dict_key', body) self.assertIn('str_value', body) self.assertIn('60', body) self.assertIn('[1, 2]', body) # As dicts are not ordered, we may get any of the following cases. case_a_dict = '{&#34;k_a&#34;: 2, &#34;k_b&#34;: 3}' case_b_dict = '{&#34;k_b&#34;: 3, &#34;k_a&#34;: 2}' try: self.assertIn(case_a_dict, body) except AssertionError: self.assertIn(case_b_dict, body) class TestMountPoint(unittest.TestCase): def setUp(self): super(TestMountPoint, self).setUp() configuration.load_test_config() configuration.conf.set("webserver", "base_url", "http://localhost:8080/test") config = dict() config['WTF_CSRF_METHODS'] = [] # Clear cached app to remount base_url forcefully application.app = None app = application.cached_app(config=config, testing=True) self.client = Client(app) def test_mount(self): response, _, _ = self.client.get('/', follow_redirects=True) txt = b''.join(response) self.assertEqual(b"Apache Airflow is not at this location", txt) response, _, _ = self.client.get('/test', follow_redirects=True) resp_html = b''.join(response) self.assertIn(b"DAGs", resp_html) class ViewWithDateTimeAndNumRunsAndDagRunsFormTester: DAG_ID = 'dag_for_testing_dt_nr_dr_form' DEFAULT_DATE = datetime(2017, 9, 1) RUNS_DATA = [ ('dag_run_for_testing_dt_nr_dr_form_4', datetime(2018, 4, 4)), ('dag_run_for_testing_dt_nr_dr_form_3', datetime(2018, 3, 3)), ('dag_run_for_testing_dt_nr_dr_form_2', datetime(2018, 2, 2)), ('dag_run_for_testing_dt_nr_dr_form_1', datetime(2018, 1, 1)), ] def __init__(self, test, endpoint): self.test = test self.endpoint = endpoint def setUp(self): configuration.load_test_config() app = application.create_app(testing=True) app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.session = Session() from airflow.www.views import dagbag from airflow.utils.state import State dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE) dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag) self.runs = [] for rd in self.RUNS_DATA: run = dag.create_dagrun( run_id=rd[0], execution_date=rd[1], state=State.SUCCESS, external_trigger=True ) self.runs.append(run) def tearDown(self): self.session.query(DagRun).filter( DagRun.dag_id == self.DAG_ID).delete() self.session.commit() self.session.close() def assertBaseDateAndNumRuns(self, base_date, num_runs, data): self.test.assertNotIn('name="base_date" value="{}"'.format(base_date), data) self.test.assertNotIn('<option selected="" value="{}">{}</option>'.format( num_runs, num_runs), data) def assertRunIsNotInDropdown(self, run, data): self.test.assertNotIn(run.execution_date.isoformat(), data) self.test.assertNotIn(run.run_id, data) def assertRunIsInDropdownNotSelected(self, run, data): self.test.assertIn('<option value="{}">{}</option>'.format( run.execution_date.isoformat(), run.run_id), data) def assertRunIsSelected(self, run, data): self.test.assertIn('<option selected value="{}">{}</option>'.format( run.execution_date.isoformat(), run.run_id), data) def test_with_default_parameters(self): """ Tests graph view with no URL parameter. Should show all dag runs in the drop down. Should select the latest dag run. Should set base date to current date (not asserted) """ response = self.app.get( self.endpoint ) self.test.assertEqual(response.status_code, 200) data = response.data.decode('utf-8') self.test.assertIn('Base date:', data) self.test.assertIn('Number of runs:', data) self.assertRunIsSelected(self.runs[0], data) self.assertRunIsInDropdownNotSelected(self.runs[1], data) self.assertRunIsInDropdownNotSelected(self.runs[2], data) self.assertRunIsInDropdownNotSelected(self.runs[3], data) def test_with_execution_date_parameter_only(self): """ Tests graph view with execution_date URL parameter. Scenario: click link from dag runs view. Should only show dag runs older than execution_date in the drop down. Should select the particular dag run. Should set base date to execution date. """ response = self.app.get( self.endpoint + '&execution_date={}'.format( self.runs[1].execution_date.isoformat()) ) self.test.assertEqual(response.status_code, 200) data = response.data.decode('utf-8') self.assertBaseDateAndNumRuns( self.runs[1].execution_date, configuration.getint('webserver', 'default_dag_run_display_number'), data) self.assertRunIsNotInDropdown(self.runs[0], data) self.assertRunIsSelected(self.runs[1], data) self.assertRunIsInDropdownNotSelected(self.runs[2], data) self.assertRunIsInDropdownNotSelected(self.runs[3], data) def test_with_base_date_and_num_runs_parmeters_only(self): """ Tests graph view with base_date and num_runs URL parameters. Should only show dag runs older than base_date in the drop down, limited to num_runs. Should select the latest dag run. Should set base date and num runs to submitted values. """ response = self.app.get( self.endpoint + '&base_date={}&num_runs=2'.format( self.runs[1].execution_date.isoformat()) ) self.test.assertEqual(response.status_code, 200) data = response.data.decode('utf-8') self.assertBaseDateAndNumRuns(self.runs[1].execution_date, 2, data) self.assertRunIsNotInDropdown(self.runs[0], data) self.assertRunIsSelected(self.runs[1], data) self.assertRunIsInDropdownNotSelected(self.runs[2], data) self.assertRunIsNotInDropdown(self.runs[3], data) def test_with_base_date_and_num_runs_and_execution_date_outside(self): """ Tests graph view with base_date and num_runs and execution-date URL parameters. Scenario: change the base date and num runs and press "Go", the selected execution date is outside the new range. Should only show dag runs older than base_date in the drop down. Should select the latest dag run within the range. Should set base date and num runs to submitted values. """ response = self.app.get( self.endpoint + '&base_date={}&num_runs=42&execution_date={}'.format( self.runs[1].execution_date.isoformat(), self.runs[0].execution_date.isoformat()) ) self.test.assertEqual(response.status_code, 200) data = response.data.decode('utf-8') self.assertBaseDateAndNumRuns(self.runs[1].execution_date, 42, data) self.assertRunIsNotInDropdown(self.runs[0], data) self.assertRunIsSelected(self.runs[1], data) self.assertRunIsInDropdownNotSelected(self.runs[2], data) self.assertRunIsInDropdownNotSelected(self.runs[3], data) def test_with_base_date_and_num_runs_and_execution_date_within(self): """ Tests graph view with base_date and num_runs and execution-date URL parameters. Scenario: change the base date and num runs and press "Go", the selected execution date is within the new range. Should only show dag runs older than base_date in the drop down. Should select the dag run with the execution date. Should set base date and num runs to submitted values. """ response = self.app.get( self.endpoint + '&base_date={}&num_runs=5&execution_date={}'.format( self.runs[2].execution_date.isoformat(), self.runs[3].execution_date.isoformat()) ) self.test.assertEqual(response.status_code, 200) data = response.data.decode('utf-8') self.assertBaseDateAndNumRuns(self.runs[2].execution_date, 5, data) self.assertRunIsNotInDropdown(self.runs[0], data) self.assertRunIsNotInDropdown(self.runs[1], data) self.assertRunIsInDropdownNotSelected(self.runs[2], data) self.assertRunIsSelected(self.runs[3], data) class TestGraphView(unittest.TestCase): GRAPH_ENDPOINT = '/admin/airflow/graph?dag_id={dag_id}'.format( dag_id=ViewWithDateTimeAndNumRunsAndDagRunsFormTester.DAG_ID ) @classmethod def setUpClass(cls): super(TestGraphView, cls).setUpClass() def setUp(self): super(TestGraphView, self).setUp() self.tester = ViewWithDateTimeAndNumRunsAndDagRunsFormTester( self, self.GRAPH_ENDPOINT) self.tester.setUp() def tearDown(self): self.tester.tearDown() super(TestGraphView, self).tearDown() @classmethod def tearDownClass(cls): super(TestGraphView, cls).tearDownClass() def test_dt_nr_dr_form_default_parameters(self): self.tester.test_with_default_parameters() def test_dt_nr_dr_form_with_execution_date_parameter_only(self): self.tester.test_with_execution_date_parameter_only() def test_dt_nr_dr_form_with_base_date_and_num_runs_parmeters_only(self): self.tester.test_with_base_date_and_num_runs_parmeters_only() def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_outside(self): self.tester.test_with_base_date_and_num_runs_and_execution_date_outside() def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_within(self): self.tester.test_with_base_date_and_num_runs_and_execution_date_within() class TestGanttView(unittest.TestCase): GANTT_ENDPOINT = '/admin/airflow/gantt?dag_id={dag_id}'.format( dag_id=ViewWithDateTimeAndNumRunsAndDagRunsFormTester.DAG_ID ) @classmethod def setUpClass(cls): super(TestGanttView, cls).setUpClass() def setUp(self): super(TestGanttView, self).setUp() self.tester = ViewWithDateTimeAndNumRunsAndDagRunsFormTester( self, self.GANTT_ENDPOINT) self.tester.setUp() def tearDown(self): self.tester.tearDown() super(TestGanttView, self).tearDown() @classmethod def tearDownClass(cls): super(TestGanttView, cls).tearDownClass() def test_dt_nr_dr_form_default_parameters(self): self.tester.test_with_default_parameters() def test_dt_nr_dr_form_with_execution_date_parameter_only(self): self.tester.test_with_execution_date_parameter_only() def test_dt_nr_dr_form_with_base_date_and_num_runs_parmeters_only(self): self.tester.test_with_base_date_and_num_runs_parmeters_only() def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_outside(self): self.tester.test_with_base_date_and_num_runs_and_execution_date_outside() def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_within(self): self.tester.test_with_base_date_and_num_runs_and_execution_date_within() if __name__ == '__main__': unittest.main()
CloverHealth/airflow
tests/www/test_views.py
Python
apache-2.0
28,088
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for preprocess_utils.""" import numpy as np import tensorflow as tf from tensorflow.python.framework import errors from deeplab.core import preprocess_utils class PreprocessUtilsTest(tf.test.TestCase): def testNoFlipWhenProbIsZero(self): numpy_image = np.dstack([[[5., 6.], [9., 0.]], [[4., 3.], [3., 5.]]]) image = tf.convert_to_tensor(numpy_image) with self.test_session(): actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=0) self.assertAllEqual(numpy_image, actual.eval()) self.assertAllEqual(False, is_flipped.eval()) actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=1) self.assertAllEqual(numpy_image, actual.eval()) self.assertAllEqual(False, is_flipped.eval()) actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=2) self.assertAllEqual(numpy_image, actual.eval()) self.assertAllEqual(False, is_flipped.eval()) def testFlipWhenProbIsOne(self): numpy_image = np.dstack([[[5., 6.], [9., 0.]], [[4., 3.], [3., 5.]]]) dim0_flipped = np.dstack([[[9., 0.], [5., 6.]], [[3., 5.], [4., 3.]]]) dim1_flipped = np.dstack([[[6., 5.], [0., 9.]], [[3., 4.], [5., 3.]]]) dim2_flipped = np.dstack([[[4., 3.], [3., 5.]], [[5., 6.], [9., 0.]]]) image = tf.convert_to_tensor(numpy_image) with self.test_session(): actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=0) self.assertAllEqual(dim0_flipped, actual.eval()) self.assertAllEqual(True, is_flipped.eval()) actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=1) self.assertAllEqual(dim1_flipped, actual.eval()) self.assertAllEqual(True, is_flipped.eval()) actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=2) self.assertAllEqual(dim2_flipped, actual.eval()) self.assertAllEqual(True, is_flipped.eval()) def testFlipMultipleImagesConsistentlyWhenProbIsOne(self): numpy_image = np.dstack([[[5., 6.], [9., 0.]], [[4., 3.], [3., 5.]]]) numpy_label = np.dstack([[[0., 1.], [2., 3.]]]) image_dim1_flipped = np.dstack([[[6., 5.], [0., 9.]], [[3., 4.], [5., 3.]]]) label_dim1_flipped = np.dstack([[[1., 0.], [3., 2.]]]) image = tf.convert_to_tensor(numpy_image) label = tf.convert_to_tensor(numpy_label) with self.test_session() as sess: image, label, is_flipped = preprocess_utils.flip_dim( [image, label], prob=1, dim=1) actual_image, actual_label = sess.run([image, label]) self.assertAllEqual(image_dim1_flipped, actual_image) self.assertAllEqual(label_dim1_flipped, actual_label) self.assertEqual(True, is_flipped.eval()) def testReturnRandomFlipsOnMultipleEvals(self): numpy_image = np.dstack([[[5., 6.], [9., 0.]], [[4., 3.], [3., 5.]]]) dim1_flipped = np.dstack([[[6., 5.], [0., 9.]], [[3., 4.], [5., 3.]]]) image = tf.convert_to_tensor(numpy_image) tf.set_random_seed(53) with self.test_session() as sess: actual, is_flipped = preprocess_utils.flip_dim( [image], prob=0.5, dim=1) actual_image, actual_is_flipped = sess.run([actual, is_flipped]) self.assertAllEqual(numpy_image, actual_image) self.assertEqual(False, actual_is_flipped) actual_image, actual_is_flipped = sess.run([actual, is_flipped]) self.assertAllEqual(dim1_flipped, actual_image) self.assertEqual(True, actual_is_flipped) def testReturnCorrectCropOfSingleImage(self): np.random.seed(0) height, width = 10, 20 image = np.random.randint(0, 256, size=(height, width, 3)) crop_height, crop_width = 2, 4 image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop([image_placeholder], crop_height, crop_width) with self.test_session(): cropped_image = cropped.eval(feed_dict={image_placeholder: image}) # Ensure we can find the cropped image in the original: is_found = False for x in range(0, width - crop_width + 1): for y in range(0, height - crop_height + 1): if np.isclose(image[y:y+crop_height, x:x+crop_width, :], cropped_image).all(): is_found = True break self.assertTrue(is_found) def testRandomCropMaintainsNumberOfChannels(self): np.random.seed(0) crop_height, crop_width = 10, 20 image = np.random.randint(0, 256, size=(100, 200, 3)) tf.set_random_seed(37) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop( [image_placeholder], crop_height, crop_width) with self.test_session(): cropped_image = cropped.eval(feed_dict={image_placeholder: image}) self.assertTupleEqual(cropped_image.shape, (crop_height, crop_width, 3)) def testReturnDifferentCropAreasOnTwoEvals(self): tf.set_random_seed(0) crop_height, crop_width = 2, 3 image = np.random.randint(0, 256, size=(100, 200, 3)) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop( [image_placeholder], crop_height, crop_width) with self.test_session(): crop0 = cropped.eval(feed_dict={image_placeholder: image}) crop1 = cropped.eval(feed_dict={image_placeholder: image}) self.assertFalse(np.isclose(crop0, crop1).all()) def testReturnConsistenCropsOfImagesInTheList(self): tf.set_random_seed(0) height, width = 10, 20 crop_height, crop_width = 2, 3 labels = np.linspace(0, height * width-1, height * width) labels = labels.reshape((height, width, 1)) image = np.tile(labels, (1, 1, 3)) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) label_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1)) [cropped_image, cropped_label] = preprocess_utils.random_crop( [image_placeholder, label_placeholder], crop_height, crop_width) with self.test_session() as sess: cropped_image, cropped_labels = sess.run([cropped_image, cropped_label], feed_dict={ image_placeholder: image, label_placeholder: labels}) for i in range(3): self.assertAllEqual(cropped_image[:, :, i], cropped_labels.squeeze()) def testDieOnRandomCropWhenImagesWithDifferentWidth(self): crop_height, crop_width = 2, 3 image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) cropped = preprocess_utils.random_crop( [image1, image2], crop_height, crop_width) with self.test_session() as sess: with self.assertRaises(errors.InvalidArgumentError): sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), image2: np.random.rand(4, 6, 1)}) def testDieOnRandomCropWhenImagesWithDifferentHeight(self): crop_height, crop_width = 2, 3 image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) cropped = preprocess_utils.random_crop( [image1, image2], crop_height, crop_width) with self.test_session() as sess: with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'Wrong height for tensor'): sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), image2: np.random.rand(3, 5, 1)}) def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self): crop_height, crop_width = 5, 9 image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) cropped = preprocess_utils.random_crop( [image1, image2], crop_height, crop_width) with self.test_session() as sess: with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'Crop size greater than the image size.'): sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), image2: np.random.rand(4, 5, 1)}) def testReturnPaddedImageWithNonZeroPadValue(self): for dtype in [np.int32, np.int64, np.float32, np.float64]: image = np.dstack([[[5, 6], [9, 0]], [[4, 3], [3, 5]]]).astype(dtype) expected_image = np.dstack([[[255, 255, 255, 255, 255], [255, 255, 255, 255, 255], [255, 5, 6, 255, 255], [255, 9, 0, 255, 255], [255, 255, 255, 255, 255]], [[255, 255, 255, 255, 255], [255, 255, 255, 255, 255], [255, 4, 3, 255, 255], [255, 3, 5, 255, 255], [255, 255, 255, 255, 255]]]).astype(dtype) with self.test_session(): image_placeholder = tf.placeholder(tf.float32) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 2, 1, 5, 5, 255) self.assertAllClose(padded_image.eval( feed_dict={image_placeholder: image}), expected_image) def testReturnOriginalImageWhenTargetSizeIsEqualToImageSize(self): image = np.dstack([[[5, 6], [9, 0]], [[4, 3], [3, 5]]]) with self.test_session(): image_placeholder = tf.placeholder(tf.float32) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 0, 0, 2, 2, 255) self.assertAllClose(padded_image.eval( feed_dict={image_placeholder: image}), image) def testDieOnTargetSizeGreaterThanImageSize(self): image = np.dstack([[[5, 6], [9, 0]], [[4, 3], [3, 5]]]) with self.test_session(): image_placeholder = tf.placeholder(tf.float32) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 0, 0, 2, 1, 255) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'target_width must be >= width'): padded_image.eval(feed_dict={image_placeholder: image}) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 0, 0, 1, 2, 255) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'target_height must be >= height'): padded_image.eval(feed_dict={image_placeholder: image}) def testDieIfTargetSizeNotPossibleWithGivenOffset(self): image = np.dstack([[[5, 6], [9, 0]], [[4, 3], [3, 5]]]) with self.test_session(): image_placeholder = tf.placeholder(tf.float32) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 3, 0, 4, 4, 255) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'target size not possible with the given target offsets'): padded_image.eval(feed_dict={image_placeholder: image}) def testDieIfImageTensorRankIsNotThree(self): image = np.vstack([[5, 6], [9, 0]]) with self.test_session(): image_placeholder = tf.placeholder(tf.float32) padded_image = preprocess_utils.pad_to_bounding_box( image_placeholder, 0, 0, 2, 2, 255) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'Wrong image tensor rank'): padded_image.eval(feed_dict={image_placeholder: image}) def testResizeTensorsToRange(self): test_shapes = [[60, 40], [15, 30], [15, 50]] min_size = 50 max_size = 100 factor = None expected_shape_list = [(75, 50, 3), (50, 100, 3), (30, 100, 3)] for i, test_shape in enumerate(test_shapes): image = tf.random_normal([test_shape[0], test_shape[1], 3]) new_tensor_list = preprocess_utils.resize_to_range( image=image, label=None, min_size=min_size, max_size=max_size, factor=factor, align_corners=True) with self.test_session() as session: resized_image = session.run(new_tensor_list[0]) self.assertEqual(resized_image.shape, expected_shape_list[i]) def testResizeTensorsToRangeWithFactor(self): test_shapes = [[60, 40], [15, 30], [15, 50]] min_size = 50 max_size = 98 factor = 8 expected_image_shape_list = [(81, 57, 3), (49, 97, 3), (33, 97, 3)] expected_label_shape_list = [(81, 57, 1), (49, 97, 1), (33, 97, 1)] for i, test_shape in enumerate(test_shapes): image = tf.random_normal([test_shape[0], test_shape[1], 3]) label = tf.random_normal([test_shape[0], test_shape[1], 1]) new_tensor_list = preprocess_utils.resize_to_range( image=image, label=label, min_size=min_size, max_size=max_size, factor=factor, align_corners=True) with self.test_session() as session: new_tensor_list = session.run(new_tensor_list) self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) def testResizeTensorsToRangeWithFactorAndLabelShapeCHW(self): test_shapes = [[60, 40], [15, 30], [15, 50]] min_size = 50 max_size = 98 factor = 8 expected_image_shape_list = [(81, 57, 3), (49, 97, 3), (33, 97, 3)] expected_label_shape_list = [(5, 81, 57), (5, 49, 97), (5, 33, 97)] for i, test_shape in enumerate(test_shapes): image = tf.random_normal([test_shape[0], test_shape[1], 3]) label = tf.random_normal([5, test_shape[0], test_shape[1]]) new_tensor_list = preprocess_utils.resize_to_range( image=image, label=label, min_size=min_size, max_size=max_size, factor=factor, align_corners=True, label_layout_is_chw=True) with self.test_session() as session: new_tensor_list = session.run(new_tensor_list) self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) def testResizeTensorsToRangeWithSimilarMinMaxSizes(self): test_shapes = [[60, 40], [15, 30], [15, 50]] # Values set so that one of the side = 97. min_size = 96 max_size = 98 factor = 8 expected_image_shape_list = [(97, 65, 3), (49, 97, 3), (33, 97, 3)] expected_label_shape_list = [(97, 65, 1), (49, 97, 1), (33, 97, 1)] for i, test_shape in enumerate(test_shapes): image = tf.random_normal([test_shape[0], test_shape[1], 3]) label = tf.random_normal([test_shape[0], test_shape[1], 1]) new_tensor_list = preprocess_utils.resize_to_range( image=image, label=label, min_size=min_size, max_size=max_size, factor=factor, align_corners=True) with self.test_session() as session: new_tensor_list = session.run(new_tensor_list) self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) if __name__ == '__main__': tf.test.main()
jiaphuan/models
research/deeplab/core/preprocess_utils_test.py
Python
apache-2.0
17,975
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Environment configuration object for Estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import os import six from tensorflow.core.protobuf import config_pb2 from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import server_lib from tensorflow.python.util import compat_internal from tensorflow.python.util import function_utils from tensorflow.python.util.tf_export import tf_export _USE_DEFAULT = object() _VALID_DEVICE_FN_ARGS = set(['op']) # A list of the property names in RunConfig that the user is allowed to change. _DEFAULT_REPLACEABLE_LIST = [ 'model_dir', 'tf_random_seed', 'save_summary_steps', 'save_checkpoints_steps', 'save_checkpoints_secs', 'session_config', 'keep_checkpoint_max', 'keep_checkpoint_every_n_hours', 'log_step_count_steps', 'train_distribute', 'device_fn' ] _SAVE_CKPT_ERR = ( '`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.' ) _TF_CONFIG_ENV = 'TF_CONFIG' _TASK_ENV_KEY = 'task' _TASK_TYPE_KEY = 'type' _TASK_ID_KEY = 'index' _CLUSTER_KEY = 'cluster' _SERVICE_KEY = 'service' _SESSION_MASTER_KEY = 'session_master' _EVAL_SESSION_MASTER_KEY = 'eval_session_master' _MODEL_DIR_KEY = 'model_dir' _LOCAL_MASTER = '' _GRPC_SCHEME = 'grpc://' def _get_session_master(cluster_spec, task_type, task_id, tf_config): """Returns the appropriate address for TensorFlow master. The order of precedence to deteremine the TF session master is as follows: 1. If `tf_session_master` is set in TF_CONFIG environment variable, takes it. 2. If the cluster has only one node, returns empty string ''. 3. Returns the grpc address according to the task type and id in the cluster. This is between-graph replication. Note: task_type and task_id must be validated. Typically, validated using `_validate_task_type_and_task_id`. Args: cluster_spec: A `ClusterSpec` instance. task_type: String. Task type for current node. task_id: Int. Task id for current node. tf_config: Dict. Python dict for the TF_CONFIG environment variable. Raises: RuntimeError: If `cluster_spec` is not set. """ if _SESSION_MASTER_KEY in tf_config: return tf_config[_SESSION_MASTER_KEY] if not cluster_spec: raise RuntimeError('Internal error: `_get_session_master` ' 'does not expect empty cluster_spec.') jobs = cluster_spec.jobs # If there is only one node in the cluster, do things locally by setting # master to ''. If a service or user sets TF_CONFIG with a single node, it's # more performant to use a direct master rather than an RPC service. if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1: return _LOCAL_MASTER # Lookup the master in cluster_spec using task_type and task_id, # if possible. addresses = cluster_spec.job_tasks(task_type) return _GRPC_SCHEME + addresses[task_id] def _get_eval_session_master(task_type, tf_config): """Returns the appropriate address for TensorFlow evaluation master.""" if task_type == TaskType.EVALUATOR: return tf_config.get(_EVAL_SESSION_MASTER_KEY, _LOCAL_MASTER) if _EVAL_SESSION_MASTER_KEY in tf_config: raise ValueError('Key ({}) should not be set for task type other than {}. ' 'Task type: {}'.format(_EVAL_SESSION_MASTER_KEY, TaskType.EVALUATOR, task_type)) return _LOCAL_MASTER def _count_ps(cluster_spec): """Counts the number of parameter servers in cluster_spec.""" if not cluster_spec: raise RuntimeError( 'Internal error: `_count_ps` does not expect empty cluster_spec.') return len(cluster_spec.as_dict().get(TaskType.PS, [])) def _count_worker(cluster_spec, chief_task_type): """Counts the number of workers (including chief) in cluster_spec.""" if not cluster_spec: raise RuntimeError( 'Internal error: `_count_worker` does not expect empty cluster_spec.') return (len(cluster_spec.as_dict().get(TaskType.WORKER, [])) + len(cluster_spec.as_dict().get(chief_task_type, []))) def _validate_service(service): """Validates the service key.""" if service is not None and not isinstance(service, dict): raise TypeError( 'If "service" is set in TF_CONFIG, it must be a dict. Given %s' % type(service)) return service def _validate_task_type_and_task_id(cluster_spec, task_env, chief_task_type): """Validates the task type and index in `task_env` according to cluster.""" if chief_task_type not in cluster_spec.jobs: raise ValueError( 'If "cluster" is set in TF_CONFIG, it must have one "%s" node.' % chief_task_type) if len(cluster_spec.job_tasks(chief_task_type)) > 1: raise ValueError( 'The "cluster" in TF_CONFIG must have only one "%s" node.' % chief_task_type) task_type = task_env.get(_TASK_TYPE_KEY, None) task_id = task_env.get(_TASK_ID_KEY, None) if not task_type: raise ValueError( 'If "cluster" is set in TF_CONFIG, task type must be set.') if task_id is None: raise ValueError( 'If "cluster" is set in TF_CONFIG, task index must be set.') task_id = int(task_id) # Check the task id bounds. Upper bound is not necessary as # - for evaluator, there is no upper bound. # - for non-evaluator, task id is upper bounded by the number of jobs in # cluster spec, which will be checked later (when retrieving the `master`) if task_id < 0: raise ValueError('Task index must be non-negative number.') # Evaluator is not part of the training cluster. if task_type == TaskType.EVALUATOR: return task_type, task_id if task_type not in cluster_spec.jobs: raise ValueError( '%s is not a valid task_type in the cluster_spec:\n' '%s\n\n' 'Note that these values may be coming from the TF_CONFIG environment ' 'variable.' % (task_type, cluster_spec)) addresses = cluster_spec.job_tasks(task_type) if not 0 <= task_id < len(addresses): raise ValueError( '%d is not a valid task_id for task_type %s in the cluster_spec:\n' '%s\n\n' 'Note that these values may be coming from the TF_CONFIG environment ' 'variable.' % (task_id, task_type, cluster_spec)) return task_type, task_id def _get_global_id_in_cluster( cluster_spec, task_type, task_id, chief_task_type): """Returns the global id in cluster.""" # Note: This is implementation details, which user should not rely on. # The first id is 0, which is always for the `chief` node. All other nodes, # except `ps`, are ordered alphabetical based on task type (alphabetically) # and task id (ascendingly). `ps` are ordered last. # Sort task names in cluster task_type_ordered_list = [chief_task_type] task_type_ordered_list.extend([ t for t in sorted(cluster_spec.jobs) if t != chief_task_type and t != TaskType.PS ]) if TaskType.PS in cluster_spec.jobs: task_type_ordered_list.append(TaskType.PS) next_global_id = 0 for t in task_type_ordered_list: if t == task_type: return next_global_id + task_id next_global_id += len(cluster_spec.job_tasks(t)) # This should never happen. raise RuntimeError('Internal Error: `task_type` ({}) is not in ' 'cluster_spec ({}).'.format(task_type, cluster_spec)) def _validate_save_ckpt_with_replaced_keys(new_copy, replaced_keys): """Validates the save ckpt properties.""" # Ensure one (and only one) of save_steps and save_secs is not None. # Also, if user sets one save ckpt property, say steps, the other one (secs) # should be set as None to improve usability. save_steps = new_copy.save_checkpoints_steps save_secs = new_copy.save_checkpoints_secs if ('save_checkpoints_steps' in replaced_keys and 'save_checkpoints_secs' in replaced_keys): # If user sets both properties explicitly, we need to error out if both # are set or neither of them are set. if save_steps is not None and save_secs is not None: raise ValueError(_SAVE_CKPT_ERR) elif 'save_checkpoints_steps' in replaced_keys and save_steps is not None: new_copy._save_checkpoints_secs = None # pylint: disable=protected-access elif 'save_checkpoints_secs' in replaced_keys and save_secs is not None: new_copy._save_checkpoints_steps = None # pylint: disable=protected-access def _validate_properties(run_config): """Validates the properties.""" def _validate(property_name, cond, message): property_value = getattr(run_config, property_name) if property_value is not None and not cond(property_value): raise ValueError(message) _validate('model_dir', lambda dir: dir, message='model_dir should be non-empty') _validate('save_summary_steps', lambda steps: steps >= 0, message='save_summary_steps should be >= 0') _validate('save_checkpoints_steps', lambda steps: steps >= 0, message='save_checkpoints_steps should be >= 0') _validate('save_checkpoints_secs', lambda secs: secs >= 0, message='save_checkpoints_secs should be >= 0') _validate('session_config', lambda sc: isinstance(sc, config_pb2.ConfigProto), message='session_config must be instance of ConfigProto') _validate('keep_checkpoint_max', lambda keep_max: keep_max >= 0, message='keep_checkpoint_max should be >= 0') _validate('keep_checkpoint_every_n_hours', lambda keep_hours: keep_hours > 0, message='keep_checkpoint_every_n_hours should be > 0') _validate('log_step_count_steps', lambda num_steps: num_steps > 0, message='log_step_count_steps should be > 0') _validate('tf_random_seed', lambda seed: isinstance(seed, six.integer_types), message='tf_random_seed must be integer.') _validate('device_fn', lambda device_fn: six.callable(device_fn) and set(function_utils.fn_args(device_fn)) == _VALID_DEVICE_FN_ARGS, message='device_fn must be callable with exactly' ' one argument "op".') class TaskType(object): MASTER = 'master' PS = 'ps' WORKER = 'worker' CHIEF = 'chief' EVALUATOR = 'evaluator' @tf_export('estimator.RunConfig') class RunConfig(object): """This class specifies the configurations for an `Estimator` run.""" def __init__(self, model_dir=None, tf_random_seed=None, save_summary_steps=100, save_checkpoints_steps=_USE_DEFAULT, save_checkpoints_secs=_USE_DEFAULT, session_config=None, keep_checkpoint_max=5, keep_checkpoint_every_n_hours=10000, log_step_count_steps=100, train_distribute=None, device_fn=None): """Constructs a RunConfig. All distributed training related properties `cluster_spec`, `is_chief`, `master` , `num_worker_replicas`, `num_ps_replicas`, `task_id`, and `task_type` are set based on the `TF_CONFIG` environment variable, if the pertinent information is present. The `TF_CONFIG` environment variable is a JSON object with attributes: `cluster` and `task`. `cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from `server_lib.py`, mapping task types (usually one of the `TaskType` enums) to a list of task addresses. `task` has two attributes: `type` and `index`, where `type` can be any of the task types in `cluster`. When `TF_CONFIG` contains said information, the following properties are set on this class: * `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}. If present, must have one and only one node in the `chief` attribute of `cluster_spec`. * `task_type` is set to `TF_CONFIG['task']['type']`. Must set if `cluster_spec` is present; must be `worker` (the default value) if `cluster_spec` is not set. * `task_id` is set to `TF_CONFIG['task']['index']`. Must set if `cluster_spec` is present; must be 0 (the default value) if `cluster_spec` is not set. * `master` is determined by looking up `task_type` and `task_id` in the `cluster_spec`. Defaults to ''. * `num_ps_replicas` is set by counting the number of nodes listed in the `ps` attribute of `cluster_spec`. Defaults to 0. * `num_worker_replicas` is set by counting the number of nodes listed in the `worker` and `chief` attributes of `cluster_spec`. Defaults to 1. * `is_chief` is determined based on `task_type` and `cluster`. There is a special node with `task_type` as `evaluator`, which is not part of the (training) `cluster_spec`. It handles the distributed evaluation job. Example of non-chief node: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps( {'cluster': cluster, 'task': {'type': 'worker', 'index': 1}}) config = RunConfig() assert config.master == 'host4:2222' assert config.task_id == 1 assert config.num_ps_replicas == 2 assert config.num_worker_replicas == 4 assert config.cluster_spec == server_lib.ClusterSpec(cluster) assert config.task_type == 'worker' assert not config.is_chief ``` Example of chief node: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps( {'cluster': cluster, 'task': {'type': 'chief', 'index': 0}}) config = RunConfig() assert config.master == 'host0:2222' assert config.task_id == 0 assert config.num_ps_replicas == 2 assert config.num_worker_replicas == 4 assert config.cluster_spec == server_lib.ClusterSpec(cluster) assert config.task_type == 'chief' assert config.is_chief ``` Example of evaluator node (evaluator is not part of training cluster): ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps( {'cluster': cluster, 'task': {'type': 'evaluator', 'index': 0}}) config = RunConfig() assert config.master == '' assert config.evaluator_master == '' assert config.task_id == 0 assert config.num_ps_replicas == 0 assert config.num_worker_replicas == 0 assert config.cluster_spec == {} assert config.task_type == 'evaluator' assert not config.is_chief ``` N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set, `keep_checkpoint_max` might need to be adjusted accordingly, especially in distributed training. For example, setting `save_checkpoints_secs` as 60 without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation that checkpoint would be garbage collected after 5 minutes. In distributed training, the evaluation job starts asynchronously and might fail to load or find the checkpoint due to race condition. Args: model_dir: directory where model parameters, graph, etc are saved. If `PathLike` object, the path will be resolved. If `None`, will use a default value set by the Estimator. tf_random_seed: Random seed for TensorFlow initializers. Setting this value allows consistency between reruns. save_summary_steps: Save summaries every this many steps. save_checkpoints_steps: Save checkpoints every this many steps. Can not be specified with `save_checkpoints_secs`. save_checkpoints_secs: Save checkpoints every this many seconds. Can not be specified with `save_checkpoints_steps`. Defaults to 600 seconds if both `save_checkpoints_steps` and `save_checkpoints_secs` are not set in constructor. If both `save_checkpoints_steps` and `save_checkpoints_secs` are None, then checkpoints are disabled. session_config: a ConfigProto used to set session parameters, or None. keep_checkpoint_max: The maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) keep_checkpoint_every_n_hours: Number of hours between each checkpoint to be saved. The default value of 10,000 hours effectively disables the feature. log_step_count_steps: The frequency, in number of global steps, that the global step/sec and the loss will be logged during training. train_distribute: an optional instance of `tf.contrib.distribute.DistributionStrategy`. If specified, then Estimator will distribute the user's model during training, according to the policy specified by that strategy. device_fn: A callable invoked for every `Operation` that takes the `Operation` and returns the device string. If `None`, defaults to the device function returned by `tf.train.replica_device_setter` with round-robin strategy. Raises: ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs` are set. """ if (save_checkpoints_steps == _USE_DEFAULT and save_checkpoints_secs == _USE_DEFAULT): save_checkpoints_steps = None save_checkpoints_secs = 600 elif save_checkpoints_secs == _USE_DEFAULT: save_checkpoints_secs = None elif save_checkpoints_steps == _USE_DEFAULT: save_checkpoints_steps = None elif (save_checkpoints_steps is not None and save_checkpoints_secs is not None): raise ValueError(_SAVE_CKPT_ERR) tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV, '{}')) if tf_config: logging.info('TF_CONFIG environment variable: %s', tf_config) model_dir = _get_model_dir(tf_config, compat_internal.path_to_str(model_dir)) RunConfig._replace( self, allowed_properties_list=_DEFAULT_REPLACEABLE_LIST, model_dir=model_dir, tf_random_seed=tf_random_seed, save_summary_steps=save_summary_steps, save_checkpoints_steps=save_checkpoints_steps, save_checkpoints_secs=save_checkpoints_secs, session_config=session_config, keep_checkpoint_max=keep_checkpoint_max, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, log_step_count_steps=log_step_count_steps, train_distribute=train_distribute, device_fn=device_fn) self._init_distributed_setting_from_environment_var(tf_config) def _init_distributed_setting_from_environment_var(self, tf_config): """Initialize distributed properties based on `tf_config`.""" self._service = _validate_service(tf_config.get(_SERVICE_KEY)) self._cluster_spec = server_lib.ClusterSpec(tf_config.get(_CLUSTER_KEY, {})) task_env = tf_config.get(_TASK_ENV_KEY, {}) if self._cluster_spec and TaskType.MASTER in self._cluster_spec.jobs: return self._init_distributed_setting_from_environment_var_with_master( tf_config) if self._cluster_spec: # Distributed mode. self._task_type, self._task_id = _validate_task_type_and_task_id( self._cluster_spec, task_env, TaskType.CHIEF) self._evaluation_master = _get_eval_session_master( self._task_type, tf_config) if self._task_type != TaskType.EVALUATOR: self._master = _get_session_master(self._cluster_spec, self._task_type, self._task_id, tf_config) self._num_ps_replicas = _count_ps(self._cluster_spec) self._num_worker_replicas = _count_worker( self._cluster_spec, chief_task_type=TaskType.CHIEF) self._global_id_in_cluster = _get_global_id_in_cluster( self._cluster_spec, self._task_type, self._task_id, chief_task_type=TaskType.CHIEF) else: # Evaluator is not part of the training cluster. self._cluster_spec = server_lib.ClusterSpec({}) self._master = _LOCAL_MASTER self._num_ps_replicas = 0 self._num_worker_replicas = 0 self._global_id_in_cluster = None # undefined self._is_chief = self._task_type == TaskType.CHIEF else: # Local mode. self._task_type = task_env.get(_TASK_TYPE_KEY, TaskType.WORKER) self._task_id = int(task_env.get(_TASK_ID_KEY, 0)) self._global_id_in_cluster = 0 if self._task_type != TaskType.WORKER: raise ValueError( 'If "cluster" is not set in TF_CONFIG, task type must be WORKER.') if self._task_id != 0: raise ValueError( 'If "cluster" is not set in TF_CONFIG, task index must be 0.') self._master = tf_config.get(_SESSION_MASTER_KEY, _LOCAL_MASTER) self._evaluation_master = tf_config.get(_EVAL_SESSION_MASTER_KEY, _LOCAL_MASTER) self._is_chief = True self._num_ps_replicas = 0 self._num_worker_replicas = 1 def _init_distributed_setting_from_environment_var_with_master(self, tf_config): """Initialize distributed properties for legacy cluster with `master`.""" # There is no tech reason, why user cannot have chief and master in the same # cluster, but it is super confusing (which is really the chief?). So, block # this case. if TaskType.CHIEF in self._cluster_spec.jobs: raise ValueError('If `master` node exists in `cluster`, job ' '`chief` is not supported.') task_env = tf_config.get(_TASK_ENV_KEY, {}) self._task_type, self._task_id = _validate_task_type_and_task_id( self._cluster_spec, task_env, TaskType.MASTER) if self._task_type == TaskType.EVALUATOR: raise ValueError('If `master` node exists in `cluster`, task_type ' '`evaluator` is not supported.') self._global_id_in_cluster = _get_global_id_in_cluster( self._cluster_spec, self._task_type, self._task_id, chief_task_type=TaskType.MASTER) self._master = _get_session_master(self._cluster_spec, self._task_type, self._task_id, tf_config) self._evaluation_master = _get_eval_session_master(self._task_type, tf_config) self._num_ps_replicas = _count_ps(self._cluster_spec) self._num_worker_replicas = _count_worker( self._cluster_spec, chief_task_type=TaskType.MASTER) self._is_chief = self._task_type == TaskType.MASTER @property def cluster_spec(self): return self._cluster_spec @property def device_fn(self): """Returns the device_fn. If device_fn is not `None`, it overrides the default device function used in `Estimator`. Otherwise the default one is used. """ return self._device_fn @property def evaluation_master(self): return self._evaluation_master @property def is_chief(self): return self._is_chief @property def master(self): return self._master @property def num_ps_replicas(self): return self._num_ps_replicas @property def num_worker_replicas(self): return self._num_worker_replicas @property def task_id(self): return self._task_id @property def global_id_in_cluster(self): """The global id in the training cluster. All global ids in the training cluster are assigned from an increasing sequence of consecutive integers. The first id is 0. Note: Task id (the property field `task_id`) is tracking the index of the node among all nodes with the SAME task type. For example, given the cluster definition as follows: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} ``` Nodes with task type `worker` can have id 0, 1, 2. Nodes with task type `ps` can have id, 0, 1. So, `task_id` is not unique, but the pair (`task_type`, `task_id`) can uniquely determine a node in the cluster. Global id, i.e., this field, is tracking the index of the node among ALL nodes in the cluster. It is uniquely assigned. For example, for the cluster spec given above, the global ids are assigned as: ``` task_type | task_id | global_id -------------------------------- chief | 0 | 0 worker | 0 | 1 worker | 1 | 2 worker | 2 | 3 ps | 0 | 4 ps | 1 | 5 ``` Returns: An integer id. """ return self._global_id_in_cluster @property def task_type(self): return self._task_type @property def tf_random_seed(self): return self._tf_random_seed @property def save_summary_steps(self): return self._save_summary_steps @property def save_checkpoints_secs(self): return self._save_checkpoints_secs @property def session_config(self): return self._session_config @property def save_checkpoints_steps(self): return self._save_checkpoints_steps @property def keep_checkpoint_max(self): return self._keep_checkpoint_max @property def keep_checkpoint_every_n_hours(self): return self._keep_checkpoint_every_n_hours @property def log_step_count_steps(self): return self._log_step_count_steps @property def model_dir(self): return self._model_dir @property def service(self): """Returns the platform defined (in TF_CONFIG) service dict.""" return self._service @property def train_distribute(self): """Returns the optional `tf.contrib.distribute.DistributionStrategy` object. """ return self._train_distribute def replace(self, **kwargs): """Returns a new instance of `RunConfig` replacing specified properties. Only the properties in the following list are allowed to be replaced: - `model_dir`, - `tf_random_seed`, - `save_summary_steps`, - `save_checkpoints_steps`, - `save_checkpoints_secs`, - `session_config`, - `keep_checkpoint_max`, - `keep_checkpoint_every_n_hours`, - `log_step_count_steps`, - `train_distribute`, - `device_fn`. In addition, either `save_checkpoints_steps` or `save_checkpoints_secs` can be set (should not be both). Args: **kwargs: keyword named properties with new values. Raises: ValueError: If any property name in `kwargs` does not exist or is not allowed to be replaced, or both `save_checkpoints_steps` and `save_checkpoints_secs` are set. Returns: a new instance of `RunConfig`. """ return RunConfig._replace( copy.deepcopy(self), allowed_properties_list=_DEFAULT_REPLACEABLE_LIST, **kwargs) @staticmethod def _replace(config, allowed_properties_list=None, **kwargs): """See `replace`. N.B.: This implementation assumes that for key named "foo", the underlying property the RunConfig holds is "_foo" (with one leading underscore). Args: config: The RunConfig to replace the values of. allowed_properties_list: The property name list allowed to be replaced. **kwargs: keyword named properties with new values. Raises: ValueError: If any property name in `kwargs` does not exist or is not allowed to be replaced, or both `save_checkpoints_steps` and `save_checkpoints_secs` are set. Returns: a new instance of `RunConfig`. """ allowed_properties_list = allowed_properties_list or [] for key, new_value in six.iteritems(kwargs): if key in allowed_properties_list: setattr(config, '_' + key, new_value) continue raise ValueError( 'Replacing {} is not supported. Allowed properties are {}.'.format( key, allowed_properties_list)) _validate_save_ckpt_with_replaced_keys(config, kwargs.keys()) _validate_properties(config) return config def _get_model_dir(tf_config, model_dir): """Returns `model_dir` based user provided `tf_config` or `model_dir`.""" # pylint: disable=g-explicit-bool-comparison # Empty string is treated as False in Python condition check, which triggers # some confusing error messages. For example, 'a or b' returns None if a is '' # and b is None. `None` is allowed for model_dir but '' is not allowed. Here, # explicitly check empty string to provide clear error message. if model_dir == '': raise ValueError('model_dir should be non-empty.') model_dir_in_tf_config = tf_config.get('model_dir') if model_dir_in_tf_config == '': raise ValueError('model_dir in TF_CONFIG should be non-empty.') if model_dir_in_tf_config: if model_dir and model_dir_in_tf_config != model_dir: raise ValueError( '`model_dir` provided in RunConfig construct, if set, ' 'must have the same value as the model_dir in TF_CONFIG. ' 'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format( model_dir, model_dir_in_tf_config)) logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config) return model_dir or model_dir_in_tf_config
nburn42/tensorflow
tensorflow/python/estimator/run_config.py
Python
apache-2.0
30,509
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import uuid from st2common import log as logging from st2common.runners.base import ActionRunner from st2common.runners.base import get_metadata as get_runner_metadata from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED import st2common.util.jsonify as jsonify __all__ = ["NoopRunner", "get_runner", "get_metadata"] LOG = logging.getLogger(__name__) class NoopRunner(ActionRunner): """ Runner which does absolutely nothing. No-op action. """ KEYS_TO_TRANSFORM = ["stdout", "stderr"] def __init__(self, runner_id): super(NoopRunner, self).__init__(runner_id=runner_id) def pre_run(self): super(NoopRunner, self).pre_run() def run(self, action_parameters): LOG.info("Executing action via NoopRunner: %s", self.runner_id) LOG.info( "[Action info] name: %s, Id: %s", self.action_name, str(self.execution_id) ) result = { "failed": False, "succeeded": True, "return_code": 0, } status = LIVEACTION_STATUS_SUCCEEDED return (status, jsonify.json_loads(result, NoopRunner.KEYS_TO_TRANSFORM), None) def get_runner(): return NoopRunner(str(uuid.uuid4())) def get_metadata(): return get_runner_metadata("noop_runner")[0]
StackStorm/st2
contrib/runners/noop_runner/noop_runner/noop_runner.py
Python
apache-2.0
1,964
from urllib.parse import urljoin from django.db import models from django.urls import resolve, reverse from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ from mezzanine.conf import settings from mezzanine.core.models import ( ContentTyped, Displayable, Orderable, RichText, wrapped_manager, ) from mezzanine.pages.fields import MenusField from mezzanine.pages.managers import PageManager from mezzanine.utils.sites import override_current_site_id from mezzanine.utils.urls import path_to_slug class BasePage(Orderable, Displayable): """ Exists solely to store ``PageManager`` as the main manager. If it's defined on ``Page``, a concrete model, then each ``Page`` subclass loses the custom manager. """ objects = wrapped_manager(PageManager) class Meta: abstract = True class Page(BasePage, ContentTyped): """ A page in the page tree. This is the base class that custom content types need to subclass. """ parent = models.ForeignKey( "Page", on_delete=models.CASCADE, blank=True, null=True, related_name="children" ) in_menus = MenusField(_("Show in menus"), blank=True, null=True) titles = models.CharField(editable=False, max_length=1000, null=True) login_required = models.BooleanField( _("Login required"), default=False, help_text=_("If checked, only logged in users can view this page"), ) class Meta: verbose_name = _("Page") verbose_name_plural = _("Pages") ordering = ("titles",) order_with_respect_to = "parent" def __str__(self): return self.titles def get_absolute_url(self): """ URL for a page - for ``Link`` page types, simply return its slug since these don't have an actual URL pattern. Also handle the special case of the homepage being a page object. """ slug = self.slug if self.content_model == "link": # Ensure the URL is absolute. slug = urljoin("/", slug) return slug if slug == "/": return reverse("home") else: return reverse("page", kwargs={"slug": slug}) def save(self, *args, **kwargs): """ Create the titles field using the titles up the parent chain and set the initial value for ordering. """ self.set_content_model() titles = [self.title] parent = self.parent while parent is not None: titles.insert(0, parent.title) parent = parent.parent self.titles = " / ".join(titles) super().save(*args, **kwargs) def description_from_content(self): """ Override ``Displayable.description_from_content`` to load the content type subclass for when ``save`` is called directly on a ``Page`` instance, so that all fields defined on the subclass are available for generating the description. """ if self.__class__ == Page: if self.content_model: return self.get_content_model().description_from_content() return super().description_from_content() def get_ascendants(self, for_user=None): """ Returns the ascendants for the page. Ascendants are cached in the ``_ascendants`` attribute, which is populated when the page is loaded via ``Page.objects.with_ascendants_for_slug``. """ if not self.parent_id: # No parents at all, bail out. return [] if not hasattr(self, "_ascendants"): # _ascendants has not been either page.get_ascendants or # Page.objects.assigned by with_ascendants_for_slug, so # run it to see if we can retrieve all parents in a single # query, which will occur if the slugs for each of the pages # have not been customised. if self.slug: kwargs = {"for_user": for_user} with override_current_site_id(self.site_id): pages = Page.objects.with_ascendants_for_slug(self.slug, **kwargs) self._ascendants = pages[0]._ascendants else: self._ascendants = [] if not self._ascendants: # Page has a parent but with_ascendants_for_slug failed to # find them due to custom slugs, so retrieve the parents # recursively. child = self while child.parent_id is not None: self._ascendants.append(child.parent) child = child.parent return self._ascendants def get_slug(self): """ Recursively build the slug from the chain of parents. """ slug = super().get_slug() if self.parent is not None: return f"{self.parent.slug}/{slug}" return slug def set_slug(self, new_slug): """ Changes this page's slug, and all other pages whose slugs start with this page's slug. """ slug_prefix = "%s/" % self.slug for page in Page.objects.filter(slug__startswith=slug_prefix): if not page.overridden(): page.slug = new_slug + page.slug[len(self.slug) :] page.save() self.slug = new_slug self.save() def set_parent(self, new_parent): """ Change the parent of this page, changing this page's slug to match the new parent if necessary. """ self_slug = self.slug old_parent_slug = self.parent.slug if self.parent else "" new_parent_slug = new_parent.slug if new_parent else "" # Make sure setting the new parent won't cause a cycle. parent = new_parent while parent is not None: if parent.pk == self.pk: raise AttributeError( "You can't set a page or its child as" " a parent." ) parent = parent.parent self.parent = new_parent self.save() if self_slug and not ( self.content_model == "link" and self.slug.startswith("http") ): if not old_parent_slug: self.set_slug("/".join((new_parent_slug, self.slug))) elif self.slug.startswith(old_parent_slug): new_slug = self.slug.replace(old_parent_slug, new_parent_slug, 1) self.set_slug(new_slug.strip("/")) def overridden(self): """ Returns ``True`` if the page's slug has an explicitly defined urlpattern and is therefore considered to be overridden. """ from mezzanine.pages.views import page page_url = reverse("page", kwargs={"slug": self.slug}) resolved_view = resolve(page_url)[0] return resolved_view != page def can_add(self, request): """ Dynamic ``add`` permission for content types to override. """ return self.slug != "/" def can_change(self, request): """ Dynamic ``change`` permission for content types to override. """ return True def can_delete(self, request): """ Dynamic ``delete`` permission for content types to override. """ return True def can_move(self, request, new_parent): """ Dynamic ``move`` permission for content types to override. Controls whether a given page move in the page tree is permitted. When the permission is denied, raises a ``PageMoveException`` with a single argument (message explaining the reason). """ pass def set_helpers(self, context): """ Called from the ``page_menu`` template tag and assigns a handful of properties based on the current page, that are used within the various types of menus. """ current_page = context["_current_page"] current_page_id = getattr(current_page, "id", None) current_parent_id = getattr(current_page, "parent_id", None) # Am I a child of the current page? self.is_current_child = self.parent_id == current_page_id self.is_child = self.is_current_child # Backward compatibility # Is my parent the same as the current page's? self.is_current_sibling = self.parent_id == current_parent_id # Am I the current page? try: request = context["request"] except KeyError: # No request context, most likely when tests are run. self.is_current = False else: self.is_current = self.slug == path_to_slug(request.path_info) # Is the current page me or any page up the parent chain? def is_c_or_a(page_id): parent_id = context.get("_parent_page_ids", {}).get(page_id) return self.id == page_id or (parent_id and is_c_or_a(parent_id)) self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id)) self.is_current_parent = self.id == current_parent_id # Am I a primary page? self.is_primary = self.parent_id is None # What's an ID I can use in HTML? self.html_id = self.slug.replace("/", "-") # Default branch level - gets assigned in the page_menu tag. self.branch_level = 0 def in_menu_template(self, template_name): if self.in_menus is not None: for i, l, t in settings.PAGE_MENU_TEMPLATES: if not str(i) in self.in_menus and t == template_name: return False return True def get_template_name(self): """ Subclasses can implement this to provide a template to use in ``mezzanine.pages.views.page``. """ return None class RichTextPage(Page, RichText): """ Implements the default type of page with a single Rich Text content field. """ class Meta: verbose_name = _("Rich text page") verbose_name_plural = _("Rich text pages") class Link(Page): """ A general content type for creating external links in the page menu. """ class Meta: verbose_name = _("Link") verbose_name_plural = _("Links") class PageMoveException(Exception): """ Raised by ``can_move()`` when the move permission is denied. Takes an optinal single argument: a message explaining the denial. """ def __init__(self, msg=None): self.msg = msg or gettext("Illegal page move") def __str__(self): return self.msg __unicode__ = __str__
stephenmcd/mezzanine
mezzanine/pages/models.py
Python
bsd-2-clause
10,686
# -*- coding: utf-8 -*- # # Django Classy Settings documentation build configuration file, created by # sphinx-quickstart on Thu Jul 24 13:53:10 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Django Classy Settings' copyright = u'2014, Curtis Maloney' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'DjangoClassySettingsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'DjangoClassySettings.tex', u'Django Classy Settings Documentation', u'Curtis Maloney', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'djangoclassysettings', u'Django Classy Settings Documentation', [u'Curtis Maloney'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'DjangoClassySettings', u'Django Classy Settings Documentation', u'Curtis Maloney', 'DjangoClassySettings', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
pombredanne/django-classy-settings
docs/conf.py
Python
bsd-2-clause
8,289
from __future__ import absolute_import from django.utils import timezone from mock import Mock, patch from sentry.testutils import AcceptanceTestCase class OrganizationRateLimitsTest(AcceptanceTestCase): def setUp(self): super(OrganizationRateLimitsTest, self).setUp() self.user = self.create_user('foo@example.com') self.org = self.create_organization( name='Rowdy Tiger', owner=None, ) self.team = self.create_team( organization=self.org, name='Mariachi Band' ) self.project = self.create_project( organization=self.org, team=self.team, name='Bengal', ) self.create_member( user=self.user, organization=self.org, role='owner', teams=[self.team], ) self.login_as(self.user) self.path = '/organizations/{}/rate-limits/'.format(self.org.slug) @patch('sentry.app.quotas.get_organization_quota', Mock(return_value=100)) def test_with_rate_limits(self): self.project.update(first_event=timezone.now()) self.browser.get(self.path) self.browser.wait_until('.organization-home') self.browser.wait_until_not('.loading-indicator') self.browser.snapshot('organization rate limits with quota') assert self.browser.element_exists('.ref-rate-limit-editor') @patch('sentry.app.quotas.get_organization_quota', Mock(return_value=0)) def test_without_rate_limits(self): self.project.update(first_event=timezone.now()) self.browser.get(self.path) self.browser.wait_until('.organization-home') self.browser.wait_until_not('.loading-indicator') self.browser.snapshot('organization rate limits without quota') assert self.browser.element_exists('.ref-no-rate-limits')
alexm92/sentry
tests/acceptance/test_organization_rate_limits.py
Python
bsd-3-clause
1,897
""" """ __docformat__ = "restructuredtext" # Se agrega tabla tagcounts para sumarizar los tags de un usuario migration = [ ("""\ CREATE TABLE tagcounts ( user_id INTEGER NOT NULL, tag_id INTEGER NOT NULL, count INTEGER NOT NULL, PRIMARY KEY (user_id, tag_id), CONSTRAINT tagcounts_user_id_fk FOREIGN KEY(user_id) REFERENCES users (id) ON DELETE cascade, CONSTRAINT tagcounts_tag_id_fk FOREIGN KEY(tag_id) REFERENCES tags (id) ON DELETE restrict ); """, """\ DROP TABLE tagcounts; """), ]
santisiri/popego
popego/popserver/popserver/db/migration_027.py
Python
bsd-3-clause
593
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import os.path as op from nose.tools import assert_true from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from nose.tools import assert_raises import numpy as np from scipy import linalg import warnings import itertools as itt from mne.cov import (regularize, whiten_evoked, _estimate_rank_meeg_cov, _auto_low_rank_model, _apply_scaling_cov, _undo_scaling_cov, prepare_noise_cov) from mne import (read_cov, write_cov, Epochs, merge_events, find_events, compute_raw_covariance, compute_covariance, read_evokeds, compute_proj_raw, pick_channels_cov, pick_channels, pick_types, pick_info, make_ad_hoc_cov) from mne.io import read_raw_fif, RawArray, read_info from mne.tests.common import assert_naming, assert_snr from mne.utils import (_TempDir, slow_test, requires_sklearn_0_15, run_tests_if_main) from mne.io.proc_history import _get_sss_rank from mne.io.pick import channel_type, _picks_by_type warnings.simplefilter('always') # enable b/c these tests throw warnings base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') cov_fname = op.join(base_dir, 'test-cov.fif') cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz') cov_km_fname = op.join(base_dir, 'test-km-cov.fif') raw_fname = op.join(base_dir, 'test_raw.fif') ave_fname = op.join(base_dir, 'test-ave.fif') erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif') hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') def test_cov_mismatch(): """Test estimation with MEG<->Head mismatch.""" raw = read_raw_fif(raw_fname, add_eeg_ref=False).crop(0, 5).load_data() events = find_events(raw, stim_channel='STI 014') raw.pick_channels(raw.ch_names[:5]) raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True, add_eeg_ref=False) for kind in ('shift', 'None'): epochs_2 = epochs.copy() # This should be fine with warnings.catch_warnings(record=True) as w: compute_covariance([epochs, epochs_2]) assert_equal(len(w), 0) if kind == 'shift': epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001 else: # None epochs_2.info['dev_head_t'] = None assert_raises(ValueError, compute_covariance, [epochs, epochs_2]) assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch='ignore') assert_equal(len(w), 0) compute_covariance([epochs, epochs_2], on_mismatch='warn') assert_raises(ValueError, compute_covariance, epochs, on_mismatch='x') assert_true(any('transform mismatch' in str(ww.message) for ww in w)) # This should work epochs.info['dev_head_t'] = None epochs_2.info['dev_head_t'] = None compute_covariance([epochs, epochs_2], method=None) def test_cov_order(): """Test covariance ordering.""" info = read_info(raw_fname) # add MEG channel with low enough index number to affect EEG if # order is incorrect info['bads'] += ['MEG 0113'] ch_names = [info['ch_names'][pick] for pick in pick_types(info, meg=False, eeg=True)] cov = read_cov(cov_fname) # no avg ref present warning prepare_noise_cov(cov, info, ch_names, verbose='error') def test_ad_hoc_cov(): """Test ad hoc cov creation and I/O.""" tempdir = _TempDir() out_fname = op.join(tempdir, 'test-cov.fif') evoked = read_evokeds(ave_fname)[0] cov = make_ad_hoc_cov(evoked.info) cov.save(out_fname) assert_true('Covariance' in repr(cov)) cov2 = read_cov(out_fname) assert_array_almost_equal(cov['data'], cov2['data']) def test_io_cov(): """Test IO for noise covariance matrices.""" tempdir = _TempDir() cov = read_cov(cov_fname) cov['method'] = 'empirical' cov['loglik'] = -np.inf cov.save(op.join(tempdir, 'test-cov.fif')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif')) assert_array_almost_equal(cov.data, cov2.data) assert_equal(cov['method'], cov2['method']) assert_equal(cov['loglik'], cov2['loglik']) assert_true('Covariance' in repr(cov)) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, 'test-cov.fif.gz')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) cov['bads'] = ['EEG 039'] cov_sel = pick_channels_cov(cov, exclude=cov['bads']) assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))) assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])) cov_sel.save(op.join(tempdir, 'test-cov.fif')) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) cov2.save(op.join(tempdir, 'test-cov.fif.gz')) cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_cov(cov_badname, cov) read_cov(cov_badname) assert_naming(w, 'test_cov.py', 2) def test_cov_estimation_on_raw(): """Test estimation from raw (typically empty room).""" tempdir = _TempDir() raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False) cov_mne = read_cov(erm_cov_fname) # The pure-string uses the more efficient numpy-based method, the # the list gets triaged to compute_covariance (should be equivalent # but use more memory) for method in (None, ['empirical']): # None is cast to 'empirical' cov = compute_raw_covariance(raw, tstep=None, method=method) assert_equal(cov.ch_names, cov_mne.ch_names) assert_equal(cov.nfree, cov_mne.nfree) assert_snr(cov.data, cov_mne.data, 1e4) cov = compute_raw_covariance(raw, method=method) # tstep=0.2 (default) assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples assert_snr(cov.data, cov_mne.data, 1e2) # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_array_almost_equal(cov.data, cov_read.data) # test with a subset of channels picks = pick_channels(raw.ch_names, include=raw.ch_names[:5]) raw_pick = raw.copy().pick_channels( [raw.ch_names[pick] for pick in picks]) raw_pick.info.normalize_proj() cov = compute_raw_covariance(raw_pick, picks=picks, tstep=None, method=method) assert_true(cov_mne.ch_names[:5] == cov.ch_names) assert_snr(cov.data, cov_mne.data[picks][:, picks], 1e4) cov = compute_raw_covariance(raw_pick, picks=picks, method=method) assert_snr(cov.data, cov_mne.data[picks][:, picks], 90) # cutoff samps # make sure we get a warning with too short a segment raw_2 = read_raw_fif(raw_fname, add_eeg_ref=False).crop(0, 1, copy=False) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') cov = compute_raw_covariance(raw_2, method=method) assert_true(any('Too few samples' in str(ww.message) for ww in w)) # no epochs found due to rejection assert_raises(ValueError, compute_raw_covariance, raw, tstep=None, method='empirical', reject=dict(eog=200e-6)) # but this should work cov = compute_raw_covariance(raw.copy().crop(0, 10., copy=False), tstep=None, method=method, reject=dict(eog=1000e-6)) @slow_test @requires_sklearn_0_15 def test_cov_estimation_on_raw_reg(): """Test estimation from raw with regularization.""" raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False) raw.info['sfreq'] /= 10. raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed cov_mne = read_cov(erm_cov_fname) with warnings.catch_warnings(record=True): # too few samples warnings.simplefilter('always') # XXX don't use "shrunk" here, for some reason it makes Travis 2.7 # hang... "diagonal_fixed" is much faster. Use long epochs for speed. cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed') assert_snr(cov.data, cov_mne.data, 5) @slow_test def test_cov_estimation_with_triggers(): """Test estimation from raw with triggers.""" tempdir = _TempDir() raw = read_raw_fif(raw_fname, preload=False, add_eeg_ref=False) raw.set_eeg_reference() events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True, add_eeg_ref=False) cov = compute_covariance(epochs, keep_sample_mean=True) cov_mne = read_cov(cov_km_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) # Test with tmin and tmax (different but not too much) cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01) assert_true(np.all(cov.data != cov_tmin_tmax.data)) assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05) # cov using a list of epochs and keep_sample_mean=True epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, add_eeg_ref=False) for ev_id in event_ids] cov2 = compute_covariance(epochs, keep_sample_mean=True) assert_array_almost_equal(cov.data, cov2.data) assert_true(cov.ch_names == cov2.ch_names) # cov with keep_sample_mean=False using a list of epochs cov = compute_covariance(epochs, keep_sample_mean=False) cov_mne = read_cov(cov_fname) assert_true(cov_mne.ch_names == cov.ch_names) assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 0.005) method_params = {'empirical': {'assume_centered': False}} assert_raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method_params=method_params) assert_raises(ValueError, compute_covariance, epochs, keep_sample_mean=False, method='factor_analysis') # test IO when computation done in Python cov.save(op.join(tempdir, 'test-cov.fif')) # test saving cov_read = read_cov(op.join(tempdir, 'test-cov.fif')) assert_true(cov_read.ch_names == cov.ch_names) assert_true(cov_read.nfree == cov.nfree) assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') / linalg.norm(cov.data, ord='fro')) < 1e-5) # cov with list of epochs with different projectors epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, add_eeg_ref=False), Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=False, reject=reject, add_eeg_ref=False)] # these should fail assert_raises(ValueError, compute_covariance, epochs) assert_raises(ValueError, compute_covariance, epochs, projs=None) # these should work, but won't be equal to above with warnings.catch_warnings(record=True) as w: # too few samples warning warnings.simplefilter('always') cov = compute_covariance(epochs, projs=epochs[0].info['projs']) cov = compute_covariance(epochs, projs=[]) assert_true(len(w) == 2) # test new dict support epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, add_eeg_ref=False) compute_covariance(epochs) def test_arithmetic_cov(): """Test arithmetic with noise covariance matrices.""" cov = read_cov(cov_fname) cov_sum = cov + cov assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree) assert_array_almost_equal(2 * cov.data, cov_sum.data) assert_true(cov.ch_names == cov_sum.ch_names) cov += cov assert_array_almost_equal(cov_sum.nfree, cov.nfree) assert_array_almost_equal(cov_sum.data, cov.data) assert_true(cov_sum.ch_names == cov.ch_names) def test_regularize_cov(): """Test cov regularization.""" raw = read_raw_fif(raw_fname, preload=False, add_eeg_ref=False) raw.info['bads'].append(raw.ch_names[0]) # test with bad channels noise_cov = read_cov(cov_fname) # Regularize noise cov reg_noise_cov = regularize(noise_cov, raw.info, mag=0.1, grad=0.1, eeg=0.1, proj=True, exclude='bads') assert_true(noise_cov['dim'] == reg_noise_cov['dim']) assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape) assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08) def test_whiten_evoked(): """Test whitening of evoked data.""" evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=True) cov = read_cov(cov_fname) ########################################################################### # Show result picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False, exclude='bads') noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1, exclude='bads') evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True) whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0] mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1) assert_true(np.all(mean_baseline < 1.)) assert_true(np.all(mean_baseline > 0.2)) # degenerate cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10]) assert_raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks) @slow_test def test_rank(): """Test cov rank estimation.""" # Test that our rank estimation works properly on a simple case evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=False) cov = read_cov(cov_fname) ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and ch.startswith('EEG')] cov = prepare_noise_cov(cov, evoked.info, ch_names, None) assert_equal(cov['eig'][0], 0.) # avg projector should set this to zero assert_true((cov['eig'][1:] > 0).all()) # all else should be > 0 # Now do some more comprehensive tests raw_sample = read_raw_fif(raw_fname, add_eeg_ref=False) raw_sss = read_raw_fif(hp_fif_fname, add_eeg_ref=False) raw_sss.add_proj(compute_proj_raw(raw_sss)) cov_sample = compute_raw_covariance(raw_sample) cov_sample_proj = compute_raw_covariance( raw_sample.copy().apply_proj()) cov_sss = compute_raw_covariance(raw_sss) cov_sss_proj = compute_raw_covariance( raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list(itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)] )) for (cov, picks_list, this_info), scalings in iter_tests: for ch_type, picks in picks_list: this_very_info = pick_info(this_info, picks) # compute subset of projs this_projs = [c['active'] and len(set(c['data']['col_names']) .intersection(set(this_very_info['ch_names']))) > 0 for c in cov['projs']] n_projs = sum(this_projs) # count channel types ch_types = [channel_type(this_very_info, idx) for idx in range(len(picks))] n_eeg, n_mag, n_grad = [ch_types.count(k) for k in ['eeg', 'mag', 'grad']] n_meg = n_mag + n_grad if ch_type in ('all', 'eeg'): n_projs_eeg = 1 else: n_projs_eeg = 0 # check sss if 'proc_history' in this_very_info: mf = this_very_info['proc_history'][0]['max_info'] n_free = _get_sss_rank(mf) if 'mag' not in ch_types and 'grad' not in ch_types: n_free = 0 # - n_projs XXX clarify expected_rank = n_free + n_eeg if n_projs > 0 and ch_type in ('all', 'eeg'): expected_rank -= n_projs_eeg else: expected_rank = n_meg + n_eeg - n_projs C = cov['data'][np.ix_(picks, picks)] est_rank = _estimate_rank_meeg_cov(C, this_very_info, scalings=scalings) assert_equal(expected_rank, est_rank) def test_cov_scaling(): """Test rescaling covs""" evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=True) cov = read_cov(cov_fname)['data'] cov2 = read_cov(cov_fname)['data'] assert_array_equal(cov, cov2) evoked.pick_channels([evoked.ch_names[k] for k in pick_types( evoked.info, meg=True, eeg=True )]) picks_list = _picks_by_type(evoked.info) scalings = dict(mag=1e15, grad=1e13, eeg=1e6) _apply_scaling_cov(cov2, picks_list, scalings=scalings) _apply_scaling_cov(cov, picks_list, scalings=scalings) assert_array_equal(cov, cov2) assert_true(cov.max() > 1) _undo_scaling_cov(cov2, picks_list, scalings=scalings) _undo_scaling_cov(cov, picks_list, scalings=scalings) assert_array_equal(cov, cov2) assert_true(cov.max() < 1) @requires_sklearn_0_15 def test_auto_low_rank(): """Test probabilistic low rank estimators.""" n_samples, n_features, rank = 400, 20, 10 sigma = 0.1 def get_data(n_samples, n_features, rank, sigma): rng = np.random.RandomState(42) W = rng.randn(n_features, n_features) X = rng.randn(n_samples, rank) U, _, _ = linalg.svd(W.copy()) X = np.dot(X, U[:, :rank].T) sigmas = sigma * rng.rand(n_features) + sigma / 2. X += rng.randn(n_samples, n_features) * sigmas return X X = get_data(n_samples=n_samples, n_features=n_features, rank=rank, sigma=sigma) method_params = {'iter_n_components': [9, 10, 11]} cv = 3 n_jobs = 1 mode = 'factor_analysis' rescale = 1e8 X *= rescale est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs, method_params=method_params, cv=cv) assert_equal(info['best'], rank) X = get_data(n_samples=n_samples, n_features=n_features, rank=rank, sigma=sigma) method_params = {'iter_n_components': [n_features + 5]} msg = ('You are trying to estimate %i components on matrix ' 'with %i features.') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs, method_params=method_params, cv=cv) assert_equal(len(w), 1) assert_equal(msg % (n_features + 5, n_features), '%s' % w[0].message) method_params = {'iter_n_components': [n_features + 5]} assert_raises(ValueError, _auto_low_rank_model, X, mode='foo', n_jobs=n_jobs, method_params=method_params, cv=cv) @slow_test @requires_sklearn_0_15 def test_compute_covariance_auto_reg(): """Test automated regularization.""" raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False) raw.resample(100, npad='auto') # much faster estimation events = find_events(raw, stim_channel='STI 014') event_ids = [1, 2, 3, 4] reject = dict(mag=4e-12) # cov with merged events and keep_sample_mean=True events_merged = merge_events(events, event_ids, 1234) # we need a few channels for numerical reasons in PCA/FA picks = pick_types(raw.info, meg='mag', eeg=False)[:10] raw.pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() epochs = Epochs( raw, events_merged, 1234, tmin=-0.2, tmax=0, baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True, add_eeg_ref=False) epochs = epochs.crop(None, 0)[:10] method_params = dict(factor_analysis=dict(iter_n_components=[3]), pca=dict(iter_n_components=[3])) covs = compute_covariance(epochs, method='auto', method_params=method_params, projs=True, return_estimators=True) logliks = [c['loglik'] for c in covs] assert_true(np.diff(logliks).max() <= 0) # descending order methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'pca'] cov3 = compute_covariance(epochs, method=methods, method_params=method_params, projs=None, return_estimators=True) assert_equal(set([c['method'] for c in cov3]), set(methods)) # invalid prespecified method assert_raises(ValueError, compute_covariance, epochs, method='pizza') # invalid scalings assert_raises(ValueError, compute_covariance, epochs, method='shrunk', scalings=dict(misc=123)) run_tests_if_main()
jniediek/mne-python
mne/tests/test_cov.py
Python
bsd-3-clause
23,537
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import base64 import json import unittest from extensions_paths import SERVER2 from file_system import StatInfo from future import Future from gitiles_file_system import (_CreateStatInfo, _ParseGitilesJson, GitilesFileSystem) from path_util import IsDirectory from test_file_system import TestFileSystem from test_util import ReadFile _BASE_URL = '' _REAL_DATA_DIR = 'chrome/common/extensions/docs/templates/public/extensions/' _TEST_DATA = (SERVER2, 'test_data', 'gitiles_file_system', 'public_extensions') # GitilesFileSystem expects file content to be encoded in base64. _TEST_FS = { 'test1.txt': base64.b64encode('test1'), 'dir1': { 'test2.txt': base64.b64encode('test2'), 'dir2': { 'test3.txt': base64.b64encode('test3') } } } class _Response(object): def __init__(self, content=''): self.content = content self.status_code = 200 class DownloadError(Exception): pass class _FakeGitilesFetcher(object): def __init__(self, fs): self._fs = fs def FetchAsync(self, url): def resolve(): assert '?' in url if url == _BASE_URL + '?format=JSON': return _Response(json.dumps({'commit': 'a_commit'})) path, fmt = url.split('?') # Fetch urls are of the form <base_url>/<path>. We only want <path>. path = path.split('/', 1)[1] if path == _REAL_DATA_DIR: return _Response(ReadFile(*_TEST_DATA)) # ALWAYS skip not found here. content = self._fs.Read((path,), skip_not_found=True).Get().get(path, None) if content is None: # GitilesFS expects a DownloadError if the file wasn't found. raise DownloadError # GitilesFS expects directory content as a JSON string. if 'JSON' in fmt: content = json.dumps({ 'entries': [{ # GitilesFS expects directory names to not have a trailing '/'. 'name': name.rstrip('/'), 'type': 'tree' if IsDirectory(name) else 'blob' } for name in content] }) return _Response(content) return Future(callback=resolve) class GitilesFileSystemTest(unittest.TestCase): def setUp(self): fetcher = _FakeGitilesFetcher(TestFileSystem(_TEST_FS)) self._gitiles_fs = GitilesFileSystem(fetcher, _BASE_URL, 'master', None) def testParseGitilesJson(self): test_json = '\n'.join([ ')]}\'', json.dumps({'commit': 'blah'}) ]) self.assertEqual(_ParseGitilesJson(test_json), {'commit': 'blah'}) def testCreateStatInfo(self): test_json = '\n'.join([ ')]}\'', json.dumps({ 'id': 'some_long_string', 'entries': [ { 'mode': 33188, 'type': 'blob', 'id': 'long_id', 'name': '.gitignore' }, { 'mode': 33188, 'type': 'blob', 'id': 'another_long_id', 'name': 'PRESUBMIT.py' }, { 'mode': 33188, 'type': 'blob', 'id': 'yali', 'name': 'README' } ] }) ]) expected_stat_info = StatInfo('some_long_string', { '.gitignore': 'long_id', 'PRESUBMIT.py': 'another_long_id', 'README': 'yali' }) self.assertEqual(_CreateStatInfo(test_json), expected_stat_info) def testRead(self): # Read a top-level file. f = self._gitiles_fs.Read(['test1.txt']) self.assertEqual(f.Get(), {'test1.txt': 'test1'}) # Read a top-level directory. f = self._gitiles_fs.Read(['dir1/']) self.assertEqual(f.Get(), {'dir1/': sorted(['test2.txt', 'dir2/'])}) # Read a nested file. f = self._gitiles_fs.Read(['dir1/test2.txt']) self.assertEqual(f.Get(), {'dir1/test2.txt': 'test2'}) # Read a nested directory. f = self._gitiles_fs.Read(['dir1/dir2/']) self.assertEqual(f.Get(), {'dir1/dir2/': ['test3.txt']}) # Read multiple paths. f = self._gitiles_fs.Read(['test1.txt', 'dir1/test2.txt']) self.assertEqual(f.Get(), {'test1.txt': 'test1', 'dir1/test2.txt': 'test2'}) # Test skip not found. f = self._gitiles_fs.Read(['fakefile'], skip_not_found=True) self.assertEqual(f.Get(), {}) def testGetCommitID(self): self.assertEqual(self._gitiles_fs.GetCommitID().Get(), 'a_commit') def testStat(self): self.assertEqual(self._gitiles_fs.Stat(_REAL_DATA_DIR).version, 'ec21e736a3f00db2c0580e3cf71d91951656caec') def testGetIdentity(self): # Test that file systems at different commits still have the same identity. other_gitiles_fs = GitilesFileSystem.Create(commit='abcdefghijklmnop') self.assertEqual(self._gitiles_fs.GetIdentity(), other_gitiles_fs.GetIdentity()) yet_another_gitiles_fs = GitilesFileSystem.Create(branch='different') self.assertNotEqual(self._gitiles_fs.GetIdentity(), yet_another_gitiles_fs.GetIdentity()) if __name__ == '__main__': unittest.main()
7kbird/chrome
chrome/common/extensions/docs/server2/gitiles_file_system_test.py
Python
bsd-3-clause
5,240
#!/usr/bin/env python # # Copyright (c) 2018, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import time import wpan from wpan import verify #----------------------------------------------------------------------------------------------------------------------- # Test description: Test behavior of "Inform Previous Parent" feature # # With this feature enabled, when a child attaches to a new parent, it will send # an IP message (with empty payload and mesh-local IP address as the source # address) to its previous parent. Upon receiving this message the previous # parent would immediately remove the child from its child table. Without this # feature, the child entry on previous parent would stay (and parent would # continue to queue messages for the sleepy child) until the child is timed out # and removed from child table. # # # Test topology: # # `child` is first attached to `parent2`. It is then forced to switch to `parent1` # # parent1--- parent2 # . / # \ / # . / # child # # This test verifies the behavior of the child and parents under this feature. # # Note that `OPENTHREAD_CONFIG_INFORM_PREVIOUS_PARENT_ON_REATTACH` is enabled in # `openthread-core-toranj.config.h` header file. # test_name = __file__[:-3] if __file__.endswith('.py') else __file__ print '-' * 120 print 'Starting \'{}\''.format(test_name) #----------------------------------------------------------------------------------------------------------------------- # Creating `wpan.Nodes` instances speedup = 4 wpan.Node.set_time_speedup_factor(speedup) parent1 = wpan.Node() parent2 = wpan.Node() child = wpan.Node() #----------------------------------------------------------------------------------------------------------------------- # Init all nodes wpan.Node.init_all_nodes() #----------------------------------------------------------------------------------------------------------------------- # Build network topology # # `child` is first attached to `parent2`. It is then forced to switch to `parent1`. # # parent1--- parent2 # . / # \ / # . / # child # parent1.whitelist_node(parent2) parent2.whitelist_node(parent1) parent2.whitelist_node(child) parent1.form("inform-parent") parent2.join_node(parent1, wpan.JOIN_TYPE_ROUTER) child.join_node(parent2, wpan.JOIN_TYPE_SLEEPY_END_DEVICE); child.set(wpan.WPAN_POLL_INTERVAL, '300') #----------------------------------------------------------------------------------------------------------------------- # Test implementation # CHILD_SUPERVISION_CHECK_TIMEOUT = 2 PARENT_SUPERVISION_INTERVAL = 1 # Verify the `child` is attached to `parent2`. child_table = wpan.parse_list(parent2.get(wpan.WPAN_THREAD_CHILD_TABLE)) verify(len(child_table) == 1) # Remove the `child` from whitelist of `parent2` and add it to whitelist of `parent1` instead. parent1.whitelist_node(child) parent2.un_whitelist_node(child) # Enable supervision check on the `child` and also on `parent1`. child.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT)) parent1.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL)) # Since child supervision is not enabled on `parent2` and the `child` is # removed from whitelist on `parent2`, after the supervision check timeout # the `child` should realize that it can no longer talk to its current # parent (`parent2`) and try to reattach. All re-attach attempts to `parent2` # should fail (due to whitelist) and cause the `child` to get detached and # search for a new parent and then attach to `parent1`. # # To verify that the `child` does get detached and attach to a new parent, we # monitor the number of state changes using wpantund property "stat:ncp". child_num_state_changes = len(wpan.parse_list(child.get("stat:ncp"))) def check_child_is_reattached(): verify(len(wpan.parse_list(child.get("stat:ncp"))) > child_num_state_changes) child_is_in_parent2_table = (len(wpan.parse_list(parent2.get(wpan.WPAN_THREAD_CHILD_TABLE)))==1) verify(child.is_associated()) wpan.verify_within(check_child_is_reattached, CHILD_SUPERVISION_CHECK_TIMEOUT / speedup + 5) # Verify that the `child` is now attached to `parent1` child_table = wpan.parse_list(parent1.get(wpan.WPAN_THREAD_CHILD_TABLE)) verify(len(child_table) == 1) # Finally verify that the `child` is removed from previous parent's child # table (which indicates that the `child` did indeed inform its previous # parent). def check_child_is_removed_from_parent2_table(): child_table = wpan.parse_list(parent2.get(wpan.WPAN_THREAD_CHILD_TABLE)) verify(len(child_table) == 0) wpan.verify_within(check_child_is_removed_from_parent2_table, 1) #----------------------------------------------------------------------------------------------------------------------- # Test finished wpan.Node.finalize_all_nodes() print '\'{}\' passed.'.format(test_name)
erja-gp/openthread
tests/toranj/test-019-inform-previous-parent.py
Python
bsd-3-clause
6,478
from django.conf import settings from django.db import connections from django.dispatch import receiver, Signal from django.template import context template_rendered = Signal(providing_args=["template", "context"]) setting_changed = Signal(providing_args=["setting", "value"]) @receiver(setting_changed) def update_connections_time_zone(**kwargs): if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC': USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ: USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value'] else: # no need to change the database connnections' time zones return tz = 'UTC' if USE_TZ else TIME_ZONE for conn in connections.all(): tz_sql = conn.ops.set_time_zone_sql() if tz_sql: conn.cursor().execute(tz_sql, [tz]) @receiver(setting_changed) def clear_context_processors_cache(**kwargs): if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS': context._standard_context_processors = None
svn2github/django
django/test/signals.py
Python
bsd-3-clause
1,072
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides the tools used to internally run the astropy test suite from the installed astropy. It makes use of the `pytest`_ testing framework. """ import os import sys import pickle import warnings import functools import pytest from astropy.units import allclose as quantity_allclose # noqa: F401 from astropy.utils.decorators import deprecated from astropy.utils.exceptions import (AstropyDeprecationWarning, AstropyPendingDeprecationWarning) # For backward-compatibility with affiliated packages from .runner import TestRunner # pylint: disable=W0611 # noqa __all__ = ['assert_follows_unicode_guidelines', 'assert_quantity_allclose', 'check_pickling_recovery', 'pickle_protocol', 'generic_recursive_equality_test'] def _save_coverage(cov, result, rootdir, testing_path): """ This method is called after the tests have been run in coverage mode to cleanup and then save the coverage data and report. """ from astropy.utils.console import color_print if result != 0: return # The coverage report includes the full path to the temporary # directory, so we replace all the paths with the true source # path. Note that this will not work properly for packages that still # rely on 2to3. try: # Coverage 4.0: _harvest_data has been renamed to get_data, the # lines dict is private cov.get_data() except AttributeError: # Coverage < 4.0 cov._harvest_data() lines = cov.data.lines else: lines = cov.data._lines for key in list(lines.keys()): new_path = os.path.relpath( os.path.realpath(key), os.path.realpath(testing_path)) new_path = os.path.abspath( os.path.join(rootdir, new_path)) lines[new_path] = lines.pop(key) color_print('Saving coverage data in .coverage...', 'green') cov.save() color_print('Saving HTML coverage report in htmlcov...', 'green') cov.html_report(directory=os.path.join(rootdir, 'htmlcov')) @deprecated('5.1', alternative='pytest.raises') class raises: """ A decorator to mark that a test should raise a given exception. Use as follows:: @raises(ZeroDivisionError) def test_foo(): x = 1/0 This can also be used a context manager, in which case it is just an alias for the ``pytest.raises`` context manager (because the two have the same name this help avoid confusion by being flexible). .. note:: Usage of ``pytest.raises`` is preferred. """ # pep-8 naming exception -- this is a decorator class def __init__(self, exc): self._exc = exc self._ctx = None def __call__(self, func): @functools.wraps(func) def run_raises_test(*args, **kwargs): pytest.raises(self._exc, func, *args, **kwargs) return run_raises_test def __enter__(self): self._ctx = pytest.raises(self._exc) return self._ctx.__enter__() def __exit__(self, *exc_info): return self._ctx.__exit__(*exc_info) # TODO: Remove these when deprecation period of things deprecated in PR 12633 are removed. _deprecations_as_exceptions = False _include_astropy_deprecations = True _modules_to_ignore_on_import = set([ r'compiler', # A deprecated stdlib module used by pytest r'scipy', r'pygments', r'ipykernel', r'IPython', # deprecation warnings for async and await r'setuptools']) _warnings_to_ignore_entire_module = set([]) _warnings_to_ignore_by_pyver = { None: set([ # Python version agnostic # https://github.com/astropy/astropy/pull/7372 (r"Importing from numpy\.testing\.decorators is deprecated, " r"import from numpy\.testing instead\.", DeprecationWarning), # inspect raises this slightly different warning on Python 3.7. # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() (r"inspect\.getargspec\(\) is deprecated, use " r"inspect\.signature\(\) or inspect\.getfullargspec\(\)", DeprecationWarning), # https://github.com/astropy/pytest-doctestplus/issues/29 (r"split\(\) requires a non-empty pattern match", FutureWarning), # Package resolution warning that we can do nothing about (r"can't resolve package from __spec__ or __package__, " r"falling back on __name__ and __path__", ImportWarning)]), (3, 7): set([ # Deprecation warning for collections.abc, fixed in Astropy but still # used in lxml, and maybe others (r"Using or importing the ABCs from 'collections'", DeprecationWarning)]) } @deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html') def enable_deprecations_as_exceptions(include_astropy_deprecations=True, modules_to_ignore_on_import=[], warnings_to_ignore_entire_module=[], warnings_to_ignore_by_pyver={}): """ Turn on the feature that turns deprecations into exceptions. Parameters ---------- include_astropy_deprecations : bool If set to `True`, ``AstropyDeprecationWarning`` and ``AstropyPendingDeprecationWarning`` are also turned into exceptions. modules_to_ignore_on_import : list of str List of additional modules that generate deprecation warnings on import, which are to be ignored. By default, these are already included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and ``setuptools``. warnings_to_ignore_entire_module : list of str List of modules with deprecation warnings to ignore completely, not just during import. If ``include_astropy_deprecations=True`` is given, ``AstropyDeprecationWarning`` and ``AstropyPendingDeprecationWarning`` are also ignored for the modules. warnings_to_ignore_by_pyver : dict Dictionary mapping tuple of ``(major, minor)`` Python version to a list of ``(warning_message, warning_class)`` to ignore. Python version-agnostic warnings should be mapped to `None` key. This is in addition of those already ignored by default (see ``_warnings_to_ignore_by_pyver`` values). """ global _deprecations_as_exceptions _deprecations_as_exceptions = True global _include_astropy_deprecations _include_astropy_deprecations = include_astropy_deprecations global _modules_to_ignore_on_import _modules_to_ignore_on_import.update(modules_to_ignore_on_import) global _warnings_to_ignore_entire_module _warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module) global _warnings_to_ignore_by_pyver for key, val in warnings_to_ignore_by_pyver.items(): if key in _warnings_to_ignore_by_pyver: _warnings_to_ignore_by_pyver[key].update(val) else: _warnings_to_ignore_by_pyver[key] = set(val) @deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html') def treat_deprecations_as_exceptions(): """ Turn all DeprecationWarnings (which indicate deprecated uses of Python itself or Numpy, but not within Astropy, where we use our own deprecation warning class) into exceptions so that we find out about them early. This completely resets the warning filters and any "already seen" warning state. """ # First, totally reset the warning state. The modules may change during # this iteration thus we copy the original state to a list to iterate # on. See https://github.com/astropy/astropy/pull/5513. for module in list(sys.modules.values()): try: del module.__warningregistry__ except Exception: pass if not _deprecations_as_exceptions: return warnings.resetwarnings() # Hide the next couple of DeprecationWarnings warnings.simplefilter('ignore', DeprecationWarning) # Here's the wrinkle: a couple of our third-party dependencies # (pytest and scipy) are still using deprecated features # themselves, and we'd like to ignore those. Fortunately, those # show up only at import time, so if we import those things *now*, # before we turn the warnings into exceptions, we're golden. for m in _modules_to_ignore_on_import: try: __import__(m) except ImportError: pass # Now, start over again with the warning filters warnings.resetwarnings() # Now, turn these warnings into exceptions _all_warns = [DeprecationWarning, FutureWarning, ImportWarning] # Only turn astropy deprecation warnings into exceptions if requested if _include_astropy_deprecations: _all_warns += [AstropyDeprecationWarning, AstropyPendingDeprecationWarning] for w in _all_warns: warnings.filterwarnings("error", ".*", w) # This ignores all specified warnings from given module(s), # not just on import, for use of Astropy affiliated packages. for m in _warnings_to_ignore_entire_module: for w in _all_warns: warnings.filterwarnings('ignore', category=w, module=m) # This ignores only specified warnings by Python version, if applicable. for v in _warnings_to_ignore_by_pyver: if v is None or sys.version_info[:2] == v: for s in _warnings_to_ignore_by_pyver[v]: warnings.filterwarnings("ignore", s[0], s[1]) @deprecated('5.1', alternative='pytest.warns') class catch_warnings(warnings.catch_warnings): """ A high-powered version of warnings.catch_warnings to use for testing and to make sure that there is no dependence on the order in which the tests are run. This completely blitzes any memory of any warnings that have appeared before so that all warnings will be caught and displayed. ``*args`` is a set of warning classes to collect. If no arguments are provided, all warnings are collected. Use as follows:: with catch_warnings(MyCustomWarning) as w: do.something.bad() assert len(w) > 0 .. note:: Usage of :ref:`pytest.warns <pytest:warns>` is preferred. """ def __init__(self, *classes): super().__init__(record=True) self.classes = classes def __enter__(self): warning_list = super().__enter__() treat_deprecations_as_exceptions() if len(self.classes) == 0: warnings.simplefilter('always') else: warnings.simplefilter('ignore') for cls in self.classes: warnings.simplefilter('always', cls) return warning_list def __exit__(self, type, value, traceback): treat_deprecations_as_exceptions() @deprecated('5.1', alternative='pytest.mark.filterwarnings') class ignore_warnings(catch_warnings): """ This can be used either as a context manager or function decorator to ignore all warnings that occur within a function or block of code. An optional category option can be supplied to only ignore warnings of a certain category or categories (if a list is provided). """ def __init__(self, category=None): super().__init__() if isinstance(category, type) and issubclass(category, Warning): self.category = [category] else: self.category = category def __call__(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): # Originally this just reused self, but that doesn't work if the # function is called more than once so we need to make a new # context manager instance for each call with self.__class__(category=self.category): return func(*args, **kwargs) return wrapper def __enter__(self): retval = super().__enter__() if self.category is not None: for category in self.category: warnings.simplefilter('ignore', category) else: warnings.simplefilter('ignore') return retval def assert_follows_unicode_guidelines( x, roundtrip=None): """ Test that an object follows our Unicode policy. See "Unicode guidelines" in the coding guidelines. Parameters ---------- x : object The instance to test roundtrip : module, optional When provided, this namespace will be used to evaluate ``repr(x)`` and ensure that it roundtrips. It will also ensure that ``__bytes__(x)`` roundtrip. If not provided, no roundtrip testing will be performed. """ from astropy import conf with conf.set_temp('unicode_output', False): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode('ascii') assert isinstance(unicode_x, str) unicode_x.encode('ascii') assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode('ascii') else: repr_x.encode('ascii') if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x with conf.set_temp('unicode_output', True): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode('ascii') assert isinstance(unicode_x, str) assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode('ascii') else: repr_x.encode('ascii') if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x @pytest.fixture(params=[0, 1, -1]) def pickle_protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). (Originally from astropy.table.tests.test_pickle) """ return request.param def generic_recursive_equality_test(a, b, class_history): """ Check if the attributes of a and b are equal. Then, check if the attributes of the attributes are equal. """ dict_a = a.__getstate__() if hasattr(a, '__getstate__') else a.__dict__ dict_b = b.__dict__ for key in dict_a: assert key in dict_b,\ f"Did not pickle {key}" if hasattr(dict_a[key], '__eq__'): eq = (dict_a[key] == dict_b[key]) if '__iter__' in dir(eq): eq = (False not in eq) assert eq, f"Value of {key} changed by pickling" if hasattr(dict_a[key], '__dict__'): if dict_a[key].__class__ in class_history: # attempt to prevent infinite recursion pass else: new_class_history = [dict_a[key].__class__] new_class_history.extend(class_history) generic_recursive_equality_test(dict_a[key], dict_b[key], new_class_history) def check_pickling_recovery(original, protocol): """ Try to pickle an object. If successful, make sure the object's attributes survived pickling and unpickling. """ f = pickle.dumps(original, protocol=protocol) unpickled = pickle.loads(f) class_history = [original.__class__] generic_recursive_equality_test(original, unpickled, class_history) def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, **kwargs): """ Raise an assertion if two objects are not equal up to desired tolerance. This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.testing.assert_allclose`. """ import numpy as np from astropy.units.quantity import _unquantify_allclose_arguments np.testing.assert_allclose(*_unquantify_allclose_arguments( actual, desired, rtol, atol), **kwargs)
pllim/astropy
astropy/tests/helper.py
Python
bsd-3-clause
16,362
from __future__ import division import numpy as np from bokeh.plotting import * N = 20 img = np.empty((N,N), dtype=np.uint32) view = img.view(dtype=np.uint8).reshape((N, N, 4)) for i in range(N): for j in range(N): view[i, j, 0] = int(i/N*255) view[i, j, 1] = 158 view[i, j, 2] = int(j/N*255) view[i, j, 3] = 255 output_file("image_rgba.html", title="image_rgba.py example") image_rgba( image=[img], x=[0], y=[0], dw=[10], dh=[10], x_range=[0,10], y_range=[0,10], tools="pan,wheel_zoom,box_zoom,reset,previewsave", name="image_example") show() # open a browser
the13fools/Bokeh_Examples
plotting/file/image_rgba.py
Python
bsd-3-clause
618
#!/usr/bin/env python # #===- rename_check.py - clang-tidy check renamer -------------*- python -*--===# # # The LLVM Compiler Infrastructure # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # #===------------------------------------------------------------------------===# import argparse import glob import os import re def replaceInFile(fileName, sFrom, sTo): if sFrom == sTo: return txt = None with open(fileName, "r") as f: txt = f.read() if sFrom not in txt: return txt = txt.replace(sFrom, sTo) print("Replacing '%s' -> '%s' in '%s'..." % (sFrom, sTo, fileName)) with open(fileName, "w") as f: f.write(txt) def generateCommentLineHeader(filename): return ''.join(['//===--- ', os.path.basename(filename), ' - clang-tidy ', '-' * max(0, 42 - len(os.path.basename(filename))), '*- C++ -*-===//']) def generateCommentLineSource(filename): return ''.join(['//===--- ', os.path.basename(filename), ' - clang-tidy', '-' * max(0, 52 - len(os.path.basename(filename))), '-===//']) def fileRename(fileName, sFrom, sTo): if sFrom not in fileName or sFrom == sTo: return fileName newFileName = fileName.replace(sFrom, sTo) print("Renaming '%s' -> '%s'..." % (fileName, newFileName)) os.rename(fileName, newFileName) return newFileName def deleteMatchingLines(fileName, pattern): lines = None with open(fileName, "r") as f: lines = f.readlines() not_matching_lines = [l for l in lines if not re.search(pattern, l)] if len(not_matching_lines) == len(lines): return False print("Removing lines matching '%s' in '%s'..." % (pattern, fileName)) print(' ' + ' '.join([l for l in lines if re.search(pattern, l)])) with open(fileName, "w") as f: f.writelines(not_matching_lines) return True def getListOfFiles(clang_tidy_path): files = glob.glob(os.path.join(clang_tidy_path, '*')) for dirname in files: if os.path.isdir(dirname): files += glob.glob(os.path.join(dirname, '*')) files += glob.glob(os.path.join(clang_tidy_path, '..', 'test', 'clang-tidy', '*')) files += glob.glob(os.path.join(clang_tidy_path, '..', 'docs', 'clang-tidy', 'checks', '*')) return [filename for filename in files if os.path.isfile(filename)] # Adapts the module's CMakelist file. Returns 'True' if it could add a new entry # and 'False' if the entry already existed. def adapt_cmake(module_path, check_name_camel): filename = os.path.join(module_path, 'CMakeLists.txt') with open(filename, 'r') as f: lines = f.readlines() cpp_file = check_name_camel + '.cpp' # Figure out whether this check already exists. for line in lines: if line.strip() == cpp_file: return False print('Updating %s...' % filename) with open(filename, 'wb') as f: cpp_found = False file_added = False for line in lines: cpp_line = line.strip().endswith('.cpp') if (not file_added) and (cpp_line or cpp_found): cpp_found = True if (line.strip() > cpp_file) or (not cpp_line): f.write(' ' + cpp_file + '\n') file_added = True f.write(line) return True # Modifies the module to include the new check. def adapt_module(module_path, module, check_name, check_name_camel): modulecpp = filter(lambda p: p.lower() == module.lower() + 'tidymodule.cpp', os.listdir(module_path))[0] filename = os.path.join(module_path, modulecpp) with open(filename, 'r') as f: lines = f.readlines() print('Updating %s...' % filename) with open(filename, 'wb') as f: header_added = False header_found = False check_added = False check_decl = (' CheckFactories.registerCheck<' + check_name_camel + '>(\n "' + check_name + '");\n') for line in lines: if not header_added: match = re.search('#include "(.*)"', line) if match: header_found = True if match.group(1) > check_name_camel: header_added = True f.write('#include "' + check_name_camel + '.h"\n') elif header_found: header_added = True f.write('#include "' + check_name_camel + '.h"\n') if not check_added: if line.strip() == '}': check_added = True f.write(check_decl) else: match = re.search('registerCheck<(.*)>', line) if match and match.group(1) > check_name_camel: check_added = True f.write(check_decl) f.write(line) # Adds a release notes entry. def add_release_notes(clang_tidy_path, old_check_name, new_check_name): filename = os.path.normpath(os.path.join(clang_tidy_path, '../docs/ReleaseNotes.rst')) with open(filename, 'r') as f: lines = f.readlines() print('Updating %s...' % filename) with open(filename, 'wb') as f: note_added = False header_found = False for line in lines: if not note_added: match = re.search('Improvements to clang-tidy', line) if match: header_found = True elif header_found: if not line.startswith('----'): f.write(""" - The '%s' check was renamed to :doc:`%s <clang-tidy/checks/%s>` """ % (old_check_name, new_check_name, new_check_name)) note_added = True f.write(line) def main(): parser = argparse.ArgumentParser(description='Rename clang-tidy check.') parser.add_argument('old_check_name', type=str, help='Old check name.') parser.add_argument('new_check_name', type=str, help='New check name.') parser.add_argument('--check_class_name', type=str, help='Old name of the class implementing the check.') args = parser.parse_args() old_module = args.old_check_name.split('-')[0] new_module = args.new_check_name.split('-')[0] if args.check_class_name: check_name_camel = args.check_class_name else: check_name_camel = (''.join(map(lambda elem: elem.capitalize(), args.old_check_name.split('-')[1:])) + 'Check') new_check_name_camel = (''.join(map(lambda elem: elem.capitalize(), args.new_check_name.split('-')[1:])) + 'Check') clang_tidy_path = os.path.dirname(__file__) header_guard_variants = [ (old_module + '_' + new_check_name_camel).upper(), args.old_check_name.replace('-', '_').upper()] header_guard_new = (new_module + '_' + new_check_name_camel).upper() old_module_path = os.path.join(clang_tidy_path, old_module) new_module_path = os.path.join(clang_tidy_path, new_module) # Remove the check from the old module. cmake_lists = os.path.join(old_module_path, 'CMakeLists.txt') check_found = deleteMatchingLines(cmake_lists, '\\b' + check_name_camel) if not check_found: print("Check name '%s' not found in %s. Exiting." % (check_name_camel, cmake_lists)) return 1 modulecpp = filter( lambda p: p.lower() == old_module.lower() + 'tidymodule.cpp', os.listdir(old_module_path))[0] deleteMatchingLines(os.path.join(old_module_path, modulecpp), '\\b' + check_name_camel + '|\\b' + args.old_check_name) for filename in getListOfFiles(clang_tidy_path): originalName = filename filename = fileRename(filename, args.old_check_name, args.new_check_name) filename = fileRename(filename, check_name_camel, new_check_name_camel) replaceInFile(filename, generateCommentLineHeader(originalName), generateCommentLineHeader(filename)) replaceInFile(filename, generateCommentLineSource(originalName), generateCommentLineSource(filename)) for header_guard in header_guard_variants: replaceInFile(filename, header_guard, header_guard_new) if args.new_check_name + '.rst' in filename: replaceInFile( filename, args.old_check_name + '\n' + '=' * len(args.old_check_name) + '\n', args.new_check_name + '\n' + '=' * len(args.new_check_name) + '\n') replaceInFile(filename, args.old_check_name, args.new_check_name) replaceInFile(filename, old_module + '::' + check_name_camel, new_module + '::' + new_check_name_camel) replaceInFile(filename, old_module + '/' + check_name_camel, new_module + '/' + new_check_name_camel) replaceInFile(filename, check_name_camel, new_check_name_camel) if old_module != new_module: check_implementation_files = glob.glob( os.path.join(old_module_path, new_check_name_camel + '*')) for filename in check_implementation_files: # Move check implementation to the directory of the new module. filename = fileRename(filename, old_module_path, new_module_path) replaceInFile(filename, 'namespace ' + old_module, 'namespace ' + new_module) # Add check to the new module. adapt_cmake(new_module_path, new_check_name_camel) adapt_module(new_module_path, new_module, args.new_check_name, new_check_name_camel) os.system(os.path.join(clang_tidy_path, 'add_new_check.py') + ' --update-docs') add_release_notes(clang_tidy_path, args.old_check_name, args.new_check_name) if __name__ == '__main__': main()
youtube/cobalt
third_party/llvm-project/clang-tools-extra/clang-tidy/rename_check.py
Python
bsd-3-clause
9,659
""" ============ Rank filters ============ Rank filters are non-linear filters using the local gray-level ordering to compute the filtered value. This ensemble of filters share a common base: the local gray-level histogram is computed on the neighborhood of a pixel (defined by a 2-D structuring element). If the filtered value is taken as the middle value of the histogram, we get the classical median filter. Rank filters can be used for several purposes such as: * image quality enhancement e.g. image smoothing, sharpening * image pre-processing e.g. noise reduction, contrast enhancement * feature extraction e.g. border detection, isolated point detection * post-processing e.g. small object removal, object grouping, contour smoothing Some well known filters are specific cases of rank filters [1]_ e.g. morphological dilation, morphological erosion, median filters. In this example, we will see how to filter a gray-level image using some of the linear and non-linear filters available in skimage. We use the `camera` image from `skimage.data` for all comparisons. .. [1] Pierre Soille, On morphological operators based on rank filters, Pattern Recognition 35 (2002) 527-535. """ import numpy as np import matplotlib.pyplot as plt from skimage import img_as_ubyte from skimage import data noisy_image = img_as_ubyte(data.camera()) hist = np.histogram(noisy_image, bins=np.arange(0, 256)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3)) ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray) ax1.axis('off') ax2.plot(hist[1][:-1], hist[0], lw=2) ax2.set_title('Histogram of grey values') """ .. image:: PLOT2RST.current_figure Noise removal ============= Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are randomly set to 0. The **median** filter is applied to remove the noise. """ from skimage.filters.rank import median from skimage.morphology import disk noise = np.random.random(noisy_image.shape) noisy_image = img_as_ubyte(data.camera()) noisy_image[noise > 0.99] = 255 noisy_image[noise < 0.01] = 0 fig, ax = plt.subplots(2, 2, figsize=(10, 7), sharex=True, sharey=True) ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray) ax1.set_title('Noisy image') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray) ax2.set_title('Median $r=1$') ax2.axis('off') ax2.set_adjustable('box-forced') ax3.imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray) ax3.set_title('Median $r=5$') ax3.axis('off') ax3.set_adjustable('box-forced') ax4.imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray) ax4.set_title('Median $r=20$') ax4.axis('off') ax4.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure The added noise is efficiently removed, as the image defaults are small (1 pixel wide), a small filter radius is sufficient. As the radius is increasing, objects with bigger sizes are filtered as well, such as the camera tripod. The median filter is often used for noise removal because borders are preserved and e.g. salt and pepper noise typically does not distort the gray-level. Image smoothing ================ The example hereunder shows how a local **mean** filter smooths the camera man image. """ from skimage.filters.rank import mean fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7], sharex=True, sharey=True) loc_mean = mean(noisy_image, disk(10)) ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray) ax1.set_title('Original') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray) ax2.set_title('Local mean $r=10$') ax2.axis('off') ax2.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure One may be interested in smoothing an image while preserving important borders (median filters already achieved this), here we use the **bilateral** filter that restricts the local neighborhood to pixel having a gray-level similar to the central one. .. note:: A different implementation is available for color images in `skimage.filters.denoise_bilateral`. """ from skimage.filters.rank import mean_bilateral noisy_image = img_as_ubyte(data.camera()) bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10) fig, ax = plt.subplots(2, 2, figsize=(10, 7), sharex='row', sharey='row') ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(bilat, cmap=plt.cm.gray) ax2.set_title('Bilateral mean') ax2.axis('off') ax2.set_adjustable('box-forced') ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray) ax3.axis('off') ax3.set_adjustable('box-forced') ax4.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray) ax4.axis('off') ax4.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure One can see that the large continuous part of the image (e.g. sky) is smoothed whereas other details are preserved. Contrast enhancement ==================== We compare here how the global histogram equalization is applied locally. The equalized image [2]_ has a roughly linear cumulative distribution function for each pixel neighborhood. The local version [3]_ of the histogram equalization emphasizes every local gray-level variations. .. [2] http://en.wikipedia.org/wiki/Histogram_equalization .. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization """ from skimage import exposure from skimage.filters import rank noisy_image = img_as_ubyte(data.camera()) # equalize globally and locally glob = exposure.equalize_hist(noisy_image) * 255 loc = rank.equalize(noisy_image, disk(20)) # extract histogram for each image hist = np.histogram(noisy_image, bins=np.arange(0, 256)) glob_hist = np.histogram(glob, bins=np.arange(0, 256)) loc_hist = np.histogram(loc, bins=np.arange(0, 256)) fig, ax = plt.subplots(3, 2, figsize=(10, 10)) ax1, ax2, ax3, ax4, ax5, ax6 = ax.ravel() ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray) ax1.axis('off') ax2.plot(hist[1][:-1], hist[0], lw=2) ax2.set_title('Histogram of gray values') ax3.imshow(glob, interpolation='nearest', cmap=plt.cm.gray) ax3.axis('off') ax4.plot(glob_hist[1][:-1], glob_hist[0], lw=2) ax4.set_title('Histogram of gray values') ax5.imshow(loc, interpolation='nearest', cmap=plt.cm.gray) ax5.axis('off') ax6.plot(loc_hist[1][:-1], loc_hist[0], lw=2) ax6.set_title('Histogram of gray values') """ .. image:: PLOT2RST.current_figure Another way to maximize the number of gray-levels used for an image is to apply a local auto-leveling, i.e. the gray-value of a pixel is proportionally remapped between local minimum and local maximum. The following example shows how local auto-level enhances the camara man picture. """ from skimage.filters.rank import autolevel noisy_image = img_as_ubyte(data.camera()) auto = autolevel(noisy_image.astype(np.uint16), disk(20)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7], sharex=True, sharey=True) ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(auto, cmap=plt.cm.gray) ax2.set_title('Local autolevel') ax2.axis('off') ax2.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure This filter is very sensitive to local outliers, see the little white spot in the left part of the sky. This is due to a local maximum which is very high comparing to the rest of the neighborhood. One can moderate this using the percentile version of the auto-level filter which uses given percentiles (one inferior, one superior) in place of local minimum and maximum. The example below illustrates how the percentile parameters influence the local auto-level result. """ from skimage.filters.rank import autolevel_percentile image = data.camera() selem = disk(20) loc_autolevel = autolevel(image, selem=selem) loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0) loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99) loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95) loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9) fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(7, 8), sharex=True, sharey=True) ax0, ax1, ax2 = axes plt.gray() title_list = ['Original', 'auto_level', 'auto-level 0%', 'auto-level 1%', 'auto-level 5%', 'auto-level 10%'] image_list = [image, loc_autolevel, loc_perc_autolevel0, loc_perc_autolevel1, loc_perc_autolevel2, loc_perc_autolevel3] axes_list = axes.ravel().tolist() for i in range(0,len(image_list)): axes_list[i].imshow(image_list[i], cmap=plt.cm.gray, vmin=0, vmax=255) axes_list[i].set_title(title_list[i]) axes_list[i].axis('off') axes_list[i].set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure The morphological contrast enhancement filter replaces the central pixel by the local maximum if the original pixel value is closest to local maximum, otherwise by the minimum local. """ from skimage.filters.rank import enhance_contrast noisy_image = img_as_ubyte(data.camera()) enh = enhance_contrast(noisy_image, disk(5)) fig, ax = plt.subplots(2, 2, figsize=[10, 7], sharex='row', sharey='row') ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(enh, cmap=plt.cm.gray) ax2.set_title('Local morphological contrast enhancement') ax2.axis('off') ax2.set_adjustable('box-forced') ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray) ax3.axis('off') ax3.set_adjustable('box-forced') ax4.imshow(enh[200:350, 350:450], cmap=plt.cm.gray) ax4.axis('off') ax4.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure The percentile version of the local morphological contrast enhancement uses percentile *p0* and *p1* instead of the local minimum and maximum. """ from skimage.filters.rank import enhance_contrast_percentile noisy_image = img_as_ubyte(data.camera()) penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9) fig, ax = plt.subplots(2, 2, figsize=[10, 7], sharex='row', sharey='row') ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax2.imshow(penh, cmap=plt.cm.gray) ax2.set_title('Local percentile morphological\n contrast enhancement') ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray) ax4.imshow(penh[200:350, 350:450], cmap=plt.cm.gray) for ax in ax.ravel(): ax.axis('off') ax.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure Image threshold =============== The Otsu threshold [1]_ method can be applied locally using the local gray- level distribution. In the example below, for each pixel, an "optimal" threshold is determined by maximizing the variance between two classes of pixels of the local neighborhood defined by a structuring element. The example compares the local threshold with the global threshold `skimage.filters.threshold_otsu`. .. note:: Local is much slower than global thresholding. A function for global Otsu thresholding can be found in : `skimage.filters.threshold_otsu`. .. [4] http://en.wikipedia.org/wiki/Otsu's_method """ from skimage.filters.rank import otsu from skimage.filters import threshold_otsu p8 = data.page() radius = 10 selem = disk(radius) # t_loc_otsu is an image t_loc_otsu = otsu(p8, selem) loc_otsu = p8 >= t_loc_otsu # t_glob_otsu is a scalar t_glob_otsu = threshold_otsu(p8) glob_otsu = p8 >= t_glob_otsu fig, ax = plt.subplots(2, 2, sharex=True, sharey=True) ax1, ax2, ax3, ax4 = ax.ravel() fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1) ax1.set_title('Original') fig.colorbar(ax2.imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2) ax2.set_title('Local Otsu ($r=%d$)' % radius) ax3.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray) ax3.set_title('Original >= local Otsu' % t_glob_otsu) ax4.imshow(glob_otsu, cmap=plt.cm.gray) ax4.set_title('Global Otsu ($t=%d$)' % t_glob_otsu) for ax in ax.ravel(): ax.axis('off') ax.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure The following example shows how local Otsu thresholding handles a global level shift applied to a synthetic image. """ n = 100 theta = np.linspace(0, 10 * np.pi, n) x = np.sin(theta) m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8) radius = 10 t = rank.otsu(m, disk(radius)) fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True) ax1.imshow(m) ax1.set_title('Original') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(m >= t, interpolation='nearest') ax2.set_title('Local Otsu ($r=%d$)' % radius) ax2.axis('off') ax2.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure Image morphology ================ Local maximum and local minimum are the base operators for gray-level morphology. .. note:: `skimage.dilate` and `skimage.erode` are equivalent filters (see below for comparison). Here is an example of the classical morphological gray-level filters: opening, closing and morphological gradient. """ from skimage.filters.rank import maximum, minimum, gradient noisy_image = img_as_ubyte(data.camera()) closing = maximum(minimum(noisy_image, disk(5)), disk(5)) opening = minimum(maximum(noisy_image, disk(5)), disk(5)) grad = gradient(noisy_image, disk(5)) # display results fig, ax = plt.subplots(2, 2, figsize=[10, 7], sharex=True, sharey=True) ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax2.imshow(closing, cmap=plt.cm.gray) ax2.set_title('Gray-level closing') ax3.imshow(opening, cmap=plt.cm.gray) ax3.set_title('Gray-level opening') ax4.imshow(grad, cmap=plt.cm.gray) ax4.set_title('Morphological gradient') for ax in ax.ravel(): ax.axis('off') ax.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure Feature extraction =================== Local histograms can be exploited to compute local entropy, which is related to the local image complexity. Entropy is computed using base 2 logarithm i.e. the filter returns the minimum number of bits needed to encode local gray-level distribution. `skimage.rank.entropy` returns the local entropy on a given structuring element. The following example shows applies this filter on 8- and 16-bit images. .. note:: to better use the available image bit, the function returns 10x entropy for 8-bit images and 1000x entropy for 16-bit images. """ from skimage import data from skimage.filters.rank import entropy from skimage.morphology import disk import numpy as np import matplotlib.pyplot as plt image = data.camera() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True, sharey=True) fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1) ax1.set_title('Image') ax1.axis('off') ax1.set_adjustable('box-forced') fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.gray), ax=ax2) ax2.set_title('Entropy') ax2.axis('off') ax2.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure Implementation ============== The central part of the `skimage.rank` filters is build on a sliding window that updates the local gray-level histogram. This approach limits the algorithm complexity to O(n) where n is the number of image pixels. The complexity is also limited with respect to the structuring element size. In the following we compare the performance of different implementations available in `skimage`. """ from time import time from scipy.ndimage import percentile_filter from skimage.morphology import dilation from skimage.filters.rank import median, maximum def exec_and_timeit(func): """Decorator that returns both function results and execution time.""" def wrapper(*arg): t1 = time() res = func(*arg) t2 = time() ms = (t2 - t1) * 1000.0 return (res, ms) return wrapper @exec_and_timeit def cr_med(image, selem): return median(image=image, selem=selem) @exec_and_timeit def cr_max(image, selem): return maximum(image=image, selem=selem) @exec_and_timeit def cm_dil(image, selem): return dilation(image=image, selem=selem) @exec_and_timeit def ndi_med(image, n): return percentile_filter(image, 50, size=n * 2 - 1) """ Comparison between * `filters.rank.maximum` * `morphology.dilate` on increasing structuring element size: """ a = data.camera() rec = [] e_range = range(1, 20, 2) for r in e_range: elem = disk(r + 1) rc, ms_rc = cr_max(a, elem) rcm, ms_rcm = cm_dil(a, elem) rec.append((ms_rc, ms_rcm)) rec = np.asarray(rec) fig, ax = plt.subplots() ax.set_title('Performance with respect to element size') ax.set_ylabel('Time (ms)') ax.set_xlabel('Element radius') ax.plot(e_range, rec) ax.legend(['filters.rank.maximum', 'morphology.dilate']) """ .. image:: PLOT2RST.current_figure and increasing image size: """ r = 9 elem = disk(r + 1) rec = [] s_range = range(100, 1000, 100) for s in s_range: a = (np.random.random((s, s)) * 256).astype(np.uint8) (rc, ms_rc) = cr_max(a, elem) (rcm, ms_rcm) = cm_dil(a, elem) rec.append((ms_rc, ms_rcm)) rec = np.asarray(rec) fig, ax = plt.subplots() ax.set_title('Performance with respect to image size') ax.set_ylabel('Time (ms)') ax.set_xlabel('Image size') ax.plot(s_range, rec) ax.legend(['filters.rank.maximum', 'morphology.dilate']) """ .. image:: PLOT2RST.current_figure Comparison between: * `filters.rank.median` * `scipy.ndimage.percentile` on increasing structuring element size: """ a = data.camera() rec = [] e_range = range(2, 30, 4) for r in e_range: elem = disk(r + 1) rc, ms_rc = cr_med(a, elem) rndi, ms_ndi = ndi_med(a, r) rec.append((ms_rc, ms_ndi)) rec = np.asarray(rec) fig, ax = plt.subplots() ax.set_title('Performance with respect to element size') ax.plot(e_range, rec) ax.legend(['filters.rank.median', 'scipy.ndimage.percentile']) ax.set_ylabel('Time (ms)') ax.set_xlabel('Element radius') """ .. image:: PLOT2RST.current_figure Comparison of outcome of the three methods: """ fig, (ax0, ax1) = plt.subplots(ncols=2, sharex=True, sharey=True) ax0.set_title('filters.rank.median') ax0.imshow(rc) ax0.axis('off') ax0.set_adjustable('box-forced') ax1.set_title('scipy.ndimage.percentile') ax1.imshow(rndi) ax1.axis('off') ax1.set_adjustable('box-forced') """ .. image:: PLOT2RST.current_figure and increasing image size: """ r = 9 elem = disk(r + 1) rec = [] s_range = [100, 200, 500, 1000] for s in s_range: a = (np.random.random((s, s)) * 256).astype(np.uint8) (rc, ms_rc) = cr_med(a, elem) rndi, ms_ndi = ndi_med(a, r) rec.append((ms_rc, ms_ndi)) rec = np.asarray(rec) fig, ax = plt.subplots() ax.set_title('Performance with respect to image size') ax.plot(s_range, rec) ax.legend(['filters.rank.median', 'scipy.ndimage.percentile']) ax.set_ylabel('Time (ms)') ax.set_xlabel('Image size') """ .. image:: PLOT2RST.current_figure """ plt.show()
pratapvardhan/scikit-image
doc/examples/xx_applications/plot_rank_filters.py
Python
bsd-3-clause
19,382
# -*- coding]= utf-8 -*- from __future__ import absolute_import, unicode_literals def execute(next_process, handler, dependencies, **kwargs): dependencies["_req"] = handler.request dependencies["_resp"] = handler.response dependencies["_handler"] = handler dependencies["_dependencies"] = dependencies next_process(dependencies, **kwargs)
silviolima/EstudoAppengine
tekton/tekton-master/src/tekton/gae/middleware/webapp2_dependencies.py
Python
mit
361
import json import logging from google.appengine.api import mail import tba_config class OutgoingNotificationHelper(object): @classmethod def send_admin_alert_email(cls, subject, email_body): # Send an email to contact@ telling them to review this # Only do this on prod if tba_config.DEBUG: return mail.send_mail(sender="The Blue Alliance Contact <contact@thebluealliance.com>", to="contact@thebluealliance.com", subject=subject, body=email_body) @classmethod def send_suggestion_result_email(cls, to, subject, email_body): # Send an alert to the user who submitted a suggestion updating them on the status if tba_config.DEBUG: return mail.send_mail(sender="The Blue Alliance Admin <contact@thebluealliance.com>", to=to, cc="contact@thebluealliance.com", subject=subject, body=email_body) @classmethod def send_slack_alert(cls, webhook_url, body_text, attachment_list=None): # Send an alert to a specified slack channel # Only do this on prod import urllib import urllib2 if tba_config.DEBUG or not webhook_url: return post_dict = { 'text': body_text, } if attachment_list: post_dict.update({ 'attachments': attachment_list, }) post_data = urllib.urlencode({"payload": json.dumps(post_dict)}) request = urllib2.Request(webhook_url, post_data) response = urllib2.urlopen(request) logging.info("Response from slack webhook {}".format(response.read()))
jaredhasenklein/the-blue-alliance
helpers/outgoing_notification_helper.py
Python
mit
1,786
import unittest import shutil import tempfile import os import random import string import bcbio.utils as utils import tests.generate_test_data as td from scilifelab.illumina.hiseq import HiSeqRun class TestHiSeqRun(unittest.TestCase): def setUp(self): self.rootdir = tempfile.mkdtemp(prefix="test_illumina_hiseq_") self.hiseq = HiSeqRun(self.rootdir) def tearDown(self): shutil.rmtree(self.rootdir) def test_parse_samplesheet(self): """Write and parse a csv-file """ # Assert non-existing file raises exception with self.assertRaises(IOError): HiSeqRun.parse_samplesheet(os.path.join(self.rootdir,'non-existing-samplesheet')) # Write a csv file with some bogus values sdata = td.generate_samplesheet_data() samplesheet = os.path.join(self.rootdir,'SampleSheet.csv') HiSeqRun.write_samplesheet(sdata,samplesheet) # Assert that the written data corresponds to the generated data with open(samplesheet) as fh: # Assert that header is correct self.assertListEqual(HiSeqRun._samplesheet_header(), fh.next().strip().split(","), "Written header does not match expected header") for entry in sdata: # Assert that all rows have the correct values in the correct columns self.assertListEqual([str(e) for e in entry], fh.next().strip().split(","), "Written data row does not match entry in generated samplesheet") # Assert that all rows from samplesheet has been consumed with self.assertRaises(StopIteration): fh.next() # Assert that the parsed data matches the generated data data = HiSeqRun.parse_samplesheet(samplesheet) self.assertEqual(len(sdata), len(data), "Number of parsed entries does not match number of generated entries") for d in data: self.assertListEqual([str(e) for e in sdata.pop(0)], [d[col] for col in HiSeqRun._samplesheet_header()], "Parsed data row does not match entry in generated samplesheet") # Assert that filtering on lane returns expected output lanes = list(set([d["Lane"] for d in data])) obs_lane_data = HiSeqRun.parse_samplesheet(samplesheet,lane=lanes[-1]) exp_lane_data = [d for d in data if str(d["Lane"]) == str(lanes[-1])] self.assertListEqual(sorted(obs_lane_data), sorted(exp_lane_data), "Parsed data row does not match entry in generated samplesheet") def test_get_project_names(self): """Get the projects from a samplesheet """ # Assert that an empty file returns an empty list fh, ssheet = tempfile.mkstemp(dir=self.rootdir, suffix=".csv") os.close(fh) self.assertListEqual([],HiSeqRun.get_project_names(ssheet), "The list of projects for an empty file is not empty") # Generate artificial samplesheet data data = td.generate_samplesheet_data() projects = {} for d in data: projects[d[-1]] = 1 # Write the data to a samplesheet td._write_samplesheet(data,ssheet) # Assert that the list of projects returned is the same that we generated self.assertListEqual(sorted(projects.keys()),sorted(HiSeqRun.get_project_names(ssheet)), "The list of projects does not match the original list") def test_get_project_sample_ids(self): """Test that getting the project samples from a samplesheet behaves as expected """ # Generate artificial samplesheet data data = td.generate_samplesheet_data() fh, ssheet = tempfile.mkstemp(dir=self.rootdir, suffix=".csv") os.close(fh) td._write_samplesheet(data,ssheet) # Assert that getting samples for a non-existing project returns an empty list self.assertListEqual([],HiSeqRun.get_project_sample_ids(ssheet,td.generate_project()), "Getting samples for a non-existing project returned unexpected output") # Iterate over the projects and assert that the returned samples are correct samples = {} for row in data: if row[9] not in samples: samples[row[9]] = [] samples[row[9]].append(row[2]) for proj, sample in samples.items(): self.assertListEqual(sorted(sample),sorted(HiSeqRun.get_project_sample_ids(ssheet,proj)), "The returned list of samples did not match the original")
SciLifeLab/scilifelab
tests/illumina/test_hiseq.py
Python
mit
5,072
import time from config.config import config from lib.irc import Irc from lib.game import Game from lib.misc import pbutton class Bot: def __init__(self): self.config = config self.irc = Irc(config) self.game = Game() self.message_buffer = [{'username': '', 'button': ''}] * self.config['misc']['chat_height'] def set_message_buffer(self, message): self.message_buffer.insert(self.config['misc']['chat_height'] - 1, message) self.message_buffer.pop(0) def run(self): throttle_timers = {button:0 for button in config['throttled_buttons'].keys()} while True: new_messages = self.irc.recv_messages(1024) if not new_messages: continue for message in new_messages: button = message['message'].lower() username = message['username'].lower() if not self.game.is_valid_button(button): continue if button in self.config['throttled_buttons']: if time.time() - throttle_timers[button] < self.config['throttled_buttons'][button]: continue throttle_timers[button] = time.time() self.set_message_buffer({'username': username, 'button': button}) pbutton(self.message_buffer) self.game.push_button(button)
aidanrt/twitch-plays
lib/bot.py
Python
mit
1,447