repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
DiamantiCom/kubernetes | refs/heads/master | hack/boilerplate/boilerplate_test.py | 630 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boilerplate
import unittest
import StringIO
import os
import sys
class TestBoilerplate(unittest.TestCase):
"""
Note: run this test from the hack/boilerplate directory.
$ python -m unittest boilerplate_test
"""
def test_boilerplate(self):
os.chdir("test/")
class Args(object):
def __init__(self):
self.filenames = []
self.rootdir = "."
self.boilerplate_dir = "../"
self.verbose = True
# capture stdout
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
boilerplate.args = Args()
ret = boilerplate.main()
output = sorted(sys.stdout.getvalue().split())
sys.stdout = old_stdout
self.assertEquals(
output, ['././fail.go', '././fail.py'])
|
firewalld/firewalld | refs/heads/master | src/firewall/core/fw_zone.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2016 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import copy
from firewall.core.base import SHORTCUTS, DEFAULT_ZONE_TARGET, SOURCE_IPSET_TYPES
from firewall.core.fw_transaction import FirewallTransaction
from firewall.core.io.policy import Policy
from firewall.core.logger import log
from firewall.core.rich import Rich_Service, Rich_Port, Rich_Protocol, Rich_SourcePort, Rich_ForwardPort, \
Rich_IcmpBlock, Rich_IcmpType, Rich_Masquerade, Rich_Mark, Rich_Tcp_Mss_Clamp
from firewall.functions import checkIPnMask, checkIP6nMask, check_mac
from firewall import errors
from firewall.errors import FirewallError
from firewall.fw_types import LastUpdatedOrderedDict
class FirewallZone(object):
ZONE_POLICY_PRIORITY = 0
def __init__(self, fw):
self._fw = fw
self._zones = { }
self._zone_policies = { }
def __repr__(self):
return '%s(%r)' % (self.__class__, self._zones)
def cleanup(self):
self._zones.clear()
self._zone_policies.clear()
def new_transaction(self):
return FirewallTransaction(self._fw)
def policy_name_from_zones(self, fromZone, toZone):
return "zone_{fromZone}_{toZone}".format(fromZone=fromZone, toZone=toZone)
# zones
def get_zones(self):
return sorted(self._zones.keys())
def get_active_zones(self):
active_zones = []
for zone in self.get_zones():
if self.list_interfaces(zone) or self.list_sources(zone):
active_zones.append(zone)
return active_zones
def get_zone_of_interface(self, interface):
interface_id = self.__interface_id(interface)
for zone in self._zones:
if interface_id in self._zones[zone].settings["interfaces"]:
# an interface can only be part of one zone
return zone
return None
def get_zone_of_source(self, source):
source_id = self.__source_id(source)
for zone in self._zones:
if source_id in self._zones[zone].settings["sources"]:
# a source_id can only be part of one zone
return zone
return None
def get_zone(self, zone):
z = self._fw.check_zone(zone)
return self._zones[z]
def policy_obj_from_zone_obj(self, z_obj, fromZone, toZone):
p_obj = Policy()
p_obj.derived_from_zone = z_obj.name
p_obj.name = self.policy_name_from_zones(fromZone, toZone)
p_obj.priority = self.ZONE_POLICY_PRIORITY
p_obj.target = z_obj.target
p_obj.ingress_zones = [fromZone]
p_obj.egress_zones = [toZone]
# copy zone permanent config to policy permanent config
# WARN: This assumes the same attribute names.
#
for setting in ["services", "ports",
"masquerade", "forward_ports",
"source_ports",
"icmp_blocks", "rules",
"protocols"]:
if fromZone == z_obj.name and toZone == "HOST" and \
setting in ["services", "ports", "source_ports", "icmp_blocks", "protocols"]:
# zone --> HOST
setattr(p_obj, setting, copy.deepcopy(getattr(z_obj, setting)))
elif fromZone == "ANY" and toZone == z_obj.name and setting in ["masquerade"]:
# any zone --> zone
setattr(p_obj, setting, copy.deepcopy(getattr(z_obj, setting)))
elif fromZone == z_obj.name and toZone == "ANY" and \
setting in ["forward_ports"]:
# zone --> any zone
setattr(p_obj, setting, copy.deepcopy(getattr(z_obj, setting)))
elif setting in ["rules"]:
p_obj.rules = []
for rule in z_obj.rules:
current_policy = self.policy_name_from_zones(fromZone, toZone)
if current_policy in self._rich_rule_to_policies(z_obj.name, rule):
p_obj.rules.append(copy.deepcopy(rule))
return p_obj
def add_zone(self, obj):
obj.settings = { x : LastUpdatedOrderedDict()
for x in ["interfaces", "sources",
"icmp_block_inversion",
"forward"] }
self._zones[obj.name] = obj
self._zone_policies[obj.name] = []
# Create policy objects, will need many:
# - (zone --> HOST) - ports, service, etc
# - (any zone --> zone) - masquerade
# - (zone --> any zone)
# - also includes forward-ports because it works on (nat,
# PREROUTING) and therefore applies to redirects to the local
# host or dnat to a different host.
# - also includes rich rule "mark" action for the same reason
#
for fromZone,toZone in [(obj.name, "HOST"),
("ANY", obj.name), (obj.name, "ANY")]:
p_obj = self.policy_obj_from_zone_obj(obj, fromZone, toZone)
self._fw.policy.add_policy(p_obj)
self._zone_policies[obj.name].append(p_obj.name)
self.copy_permanent_to_runtime(obj.name)
def copy_permanent_to_runtime(self, zone):
obj = self._zones[zone]
for arg in obj.interfaces:
self.add_interface(zone, arg, allow_apply=False)
for arg in obj.sources:
self.add_source(zone, arg, allow_apply=False)
if obj.forward:
self.add_forward(zone)
if obj.icmp_block_inversion:
self.add_icmp_block_inversion(zone)
def remove_zone(self, zone):
obj = self._zones[zone]
if obj.applied:
self.unapply_zone_settings(zone)
obj.settings.clear()
del self._zones[zone]
del self._zone_policies[zone]
def apply_zones(self, use_transaction=None):
for zone in self.get_zones():
z_obj = self._zones[zone]
if len(z_obj.interfaces) > 0 or len(z_obj.sources) > 0:
log.debug1("Applying zone '%s'", zone)
self.apply_zone_settings(zone, use_transaction=use_transaction)
def set_zone_applied(self, zone, applied):
obj = self._zones[zone]
obj.applied = applied
# zone from chain
def zone_from_chain(self, chain):
if "_" not in chain:
# no zone chain
return None
splits = chain.split("_")
if len(splits) < 2:
return None
_chain = None
for x in SHORTCUTS:
if splits[0] == SHORTCUTS[x]:
_chain = x
if _chain is not None:
# next part needs to be zone name
if splits[1] not in self.get_zones():
return None
if len(splits) == 2 or \
(len(splits) == 3 and splits[2] in [ "pre", "log", "deny", "allow", "post" ]):
return (splits[1], _chain)
return None
def policy_from_chain(self, chain):
x = self.zone_from_chain(chain)
if x is None:
return None
(zone, _chain) = x
# derived from _get_table_chains_for_zone_dispatch()
if _chain in ["PREROUTING", "FORWARD"]:
fromZone = zone
toZone = "ANY"
elif _chain in ["INPUT"]:
fromZone = zone
toZone = "HOST"
elif _chain in ["POSTROUTING"]:
fromZone = "ANY"
toZone = zone
else:
raise FirewallError(errors.INVALID_CHAIN, "chain '%s' can't be mapped to a policy" % (chain))
return (self.policy_name_from_zones(fromZone, toZone), _chain)
def create_zone_base_by_chain(self, ipv, table, chain,
use_transaction=None):
# Create zone base chains if the chain is reserved for a zone
if ipv in [ "ipv4", "ipv6" ]:
x = self.policy_from_chain(chain)
if x is not None:
(policy, _chain) = self.policy_from_chain(chain)
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
self._fw.policy.gen_chain_rules(policy, True, table, _chain,
transaction)
if use_transaction is None:
transaction.execute(True)
# settings
# generate settings record with sender, timeout
def __gen_settings(self, timeout, sender):
ret = {
"date": time.time(),
"sender": sender,
"timeout": timeout,
}
return ret
def get_settings(self, zone):
return self.get_zone(zone).settings
def _zone_settings(self, enable, zone, transaction):
settings = self.get_settings(zone)
for key in settings:
for args in settings[key]:
if key == "interfaces":
self._interface(enable, zone, args, transaction)
elif key == "sources":
self._source(enable, zone, args[0], args[1], transaction)
elif key == "icmp_block_inversion":
continue
elif key == "forward":
# no need to call this when applying the zone as the rules
# will be generated when adding the interfaces/sources
pass
else:
log.warning("Zone '%s': Unknown setting '%s:%s', "
"unable to apply", zone, key, args)
# ICMP-block-inversion is always applied
if enable:
self._icmp_block_inversion(enable, zone, transaction)
def apply_zone_settings(self, zone, use_transaction=None):
_zone = self._fw.check_zone(zone)
obj = self._zones[_zone]
if obj.applied:
return
obj.applied = True
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
for policy in self._zone_policies[_zone]:
log.debug1("Applying policy (%s) derived from zone '%s'", policy, zone)
self._fw.policy.apply_policy_settings(policy, use_transaction=transaction)
self._zone_settings(True, _zone, transaction)
if use_transaction is None:
transaction.execute(True)
def unapply_zone_settings(self, zone, use_transaction=None):
_zone = self._fw.check_zone(zone)
obj = self._zones[_zone]
if not obj.applied:
return
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
for policy in self._zone_policies[_zone]:
self._fw.policy.unapply_policy_settings(policy, use_transaction=transaction)
self._zone_settings(False, _zone, transaction)
if use_transaction is None:
transaction.execute(True)
def get_config_with_settings(self, zone):
"""
:return: exported config updated with runtime settings
"""
obj = self.get_zone(zone)
conf_dict = self.get_config_with_settings_dict(zone)
conf_list = []
for i in range(16): # tuple based API has 16 elements
if obj.IMPORT_EXPORT_STRUCTURE[i][0] not in conf_dict:
# old API needs the empty elements as well. Grab it from the
# class otherwise we don't know the type.
conf_list.append(copy.deepcopy(getattr(obj, obj.IMPORT_EXPORT_STRUCTURE[i][0])))
else:
conf_list.append(conf_dict[obj.IMPORT_EXPORT_STRUCTURE[i][0]])
return tuple(conf_list)
def get_config_with_settings_dict(self, zone):
"""
:return: exported config updated with runtime settings
"""
permanent = self.get_zone(zone).export_config_dict()
if permanent["target"] == DEFAULT_ZONE_TARGET:
permanent["target"] = "default"
runtime = { "services": self.list_services(zone),
"ports": self.list_ports(zone),
"icmp_blocks": self.list_icmp_blocks(zone),
"masquerade": self.query_masquerade(zone),
"forward_ports": self.list_forward_ports(zone),
"interfaces": self.list_interfaces(zone),
"sources": self.list_sources(zone),
"rules_str": self.list_rules(zone),
"protocols": self.list_protocols(zone),
"source_ports": self.list_source_ports(zone),
"icmp_block_inversion": self.query_icmp_block_inversion(zone),
"forward": self.query_forward(zone),
}
return self._fw.combine_runtime_with_permanent_settings(permanent, runtime)
def set_config_with_settings_dict(self, zone, settings, sender):
# stupid wrappers to convert rich rule string to rich rule object
from firewall.core.rich import Rich_Rule
def add_rule_wrapper(zone, rule_str, timeout=0, sender=None):
self.add_rule(zone, Rich_Rule(rule_str=rule_str), timeout=0, sender=sender)
def remove_rule_wrapper(zone, rule_str):
self.remove_rule(zone, Rich_Rule(rule_str=rule_str))
setting_to_fn = {
"services": (self.add_service, self.remove_service),
"ports": (self.add_port, self.remove_port),
"icmp_blocks": (self.add_icmp_block, self.remove_icmp_block),
"masquerade": (self.add_masquerade, self.remove_masquerade),
"forward_ports": (self.add_forward_port, self.remove_forward_port),
"interfaces": (self.add_interface, self.remove_interface),
"sources": (self.add_source, self.remove_source),
"rules_str": (add_rule_wrapper, remove_rule_wrapper),
"protocols": (self.add_protocol, self.remove_protocol),
"source_ports": (self.add_source_port, self.remove_source_port),
"icmp_block_inversion": (self.add_icmp_block_inversion, self.remove_icmp_block_inversion),
"forward": (self.add_forward, self.remove_forward),
}
old_settings = self.get_config_with_settings_dict(zone)
(add_settings, remove_settings) = self._fw.get_added_and_removed_settings(old_settings, settings)
for key in remove_settings:
if isinstance(remove_settings[key], list):
for args in remove_settings[key]:
if isinstance(args, tuple):
setting_to_fn[key][1](zone, *args)
else:
setting_to_fn[key][1](zone, args)
else: # bool
setting_to_fn[key][1](zone)
for key in add_settings:
if isinstance(add_settings[key], list):
for args in add_settings[key]:
if key in ["interfaces", "sources"]:
# no timeout arg
setting_to_fn[key][0](zone, args, sender=sender)
else:
if isinstance(args, tuple):
setting_to_fn[key][0](zone, *args, timeout=0, sender=sender)
else:
setting_to_fn[key][0](zone, args, timeout=0, sender=sender)
else: # bool
if key in ["icmp_block_inversion"]:
# no timeout arg
setting_to_fn[key][0](zone, sender=sender)
else:
setting_to_fn[key][0](zone, timeout=0, sender=sender)
# INTERFACES
def check_interface(self, interface):
self._fw.check_interface(interface)
def interface_get_sender(self, zone, interface):
_zone = self._fw.check_zone(zone)
_obj = self._zones[_zone]
interface_id = self.__interface_id(interface)
if interface_id in _obj.settings["interfaces"]:
settings = _obj.settings["interfaces"][interface_id]
if "sender" in settings and settings["sender"] is not None:
return settings["sender"]
return None
def __interface_id(self, interface):
self.check_interface(interface)
return interface
def add_interface(self, zone, interface, sender=None,
use_transaction=None, allow_apply=True):
self._fw.check_panic()
_zone = self._fw.check_zone(zone)
_obj = self._zones[_zone]
interface_id = self.__interface_id(interface)
if interface_id in _obj.settings["interfaces"]:
raise FirewallError(errors.ZONE_ALREADY_SET,
"'%s' already bound to '%s'" % (interface,
zone))
zoi = self.get_zone_of_interface(interface)
if zoi is not None:
raise FirewallError(errors.ZONE_CONFLICT,
"'%s' already bound to '%s'" % (interface,
zoi))
log.debug1("Setting zone of interface '%s' to '%s'" % (interface,
_zone))
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
if not _obj.applied and allow_apply:
self.apply_zone_settings(zone,
use_transaction=transaction)
transaction.add_fail(self.set_zone_applied, _zone, False)
if allow_apply:
self._interface(True, _zone, interface, transaction)
self.__register_interface(_obj, interface_id, zone, sender)
transaction.add_fail(self.__unregister_interface, _obj,
interface_id)
if use_transaction is None:
transaction.execute(True)
return _zone
def __register_interface(self, _obj, interface_id, zone, sender):
_obj.settings["interfaces"][interface_id] = \
self.__gen_settings(0, sender)
# add information whether we add to default or specific zone
_obj.settings["interfaces"][interface_id]["__default__"] = \
(not zone or zone == "")
def change_zone_of_interface(self, zone, interface, sender=None):
self._fw.check_panic()
_old_zone = self.get_zone_of_interface(interface)
_new_zone = self._fw.check_zone(zone)
if _new_zone == _old_zone:
return _old_zone
if _old_zone is not None:
self.remove_interface(_old_zone, interface)
_zone = self.add_interface(zone, interface, sender)
return _zone
def change_default_zone(self, old_zone, new_zone, use_transaction=None):
self._fw.check_panic()
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
self.apply_zone_settings(new_zone, transaction)
self._interface(True, new_zone, "+", transaction, append=True)
if old_zone is not None and old_zone != "":
self._interface(False, old_zone, "+", transaction, append=True)
if use_transaction is None:
transaction.execute(True)
def remove_interface(self, zone, interface,
use_transaction=None):
self._fw.check_panic()
zoi = self.get_zone_of_interface(interface)
if zoi is None:
raise FirewallError(errors.UNKNOWN_INTERFACE,
"'%s' is not in any zone" % interface)
_zone = zoi if zone == "" else self._fw.check_zone(zone)
if zoi != _zone:
raise FirewallError(errors.ZONE_CONFLICT,
"remove_interface(%s, %s): zoi='%s'" % \
(zone, interface, zoi))
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
_obj = self._zones[_zone]
interface_id = self.__interface_id(interface)
transaction.add_post(self.__unregister_interface, _obj, interface_id)
self._interface(False, _zone, interface, transaction)
if use_transaction is None:
transaction.execute(True)
return _zone
def __unregister_interface(self, _obj, interface_id):
if interface_id in _obj.settings["interfaces"]:
del _obj.settings["interfaces"][interface_id]
def query_interface(self, zone, interface):
return self.__interface_id(interface) in self.get_settings(zone)["interfaces"]
def list_interfaces(self, zone):
return self.get_settings(zone)["interfaces"].keys()
# SOURCES
def check_source(self, source, applied=False):
if checkIPnMask(source):
return "ipv4"
elif checkIP6nMask(source):
return "ipv6"
elif check_mac(source):
return ""
elif source.startswith("ipset:"):
self._check_ipset_type_for_source(source[6:])
if applied:
self._check_ipset_applied(source[6:])
return self._ipset_family(source[6:])
else:
raise FirewallError(errors.INVALID_ADDR, source)
def __source_id(self, source, applied=False):
ipv = self.check_source(source, applied=applied)
return (ipv, source)
def add_source(self, zone, source, sender=None, use_transaction=None,
allow_apply=True):
self._fw.check_panic()
_zone = self._fw.check_zone(zone)
_obj = self._zones[_zone]
if check_mac(source):
source = source.upper()
source_id = self.__source_id(source, applied=allow_apply)
if source_id in _obj.settings["sources"]:
raise FirewallError(errors.ZONE_ALREADY_SET,
"'%s' already bound to '%s'" % (source, _zone))
if self.get_zone_of_source(source) is not None:
raise FirewallError(errors.ZONE_CONFLICT,
"'%s' already bound to a zone" % source)
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
if not _obj.applied and allow_apply:
self.apply_zone_settings(zone,
use_transaction=transaction)
transaction.add_fail(self.set_zone_applied, _zone, False)
if allow_apply:
self._source(True, _zone, source_id[0], source_id[1], transaction)
self.__register_source(_obj, source_id, zone, sender)
transaction.add_fail(self.__unregister_source, _obj, source_id)
if use_transaction is None:
transaction.execute(True)
return _zone
def __register_source(self, _obj, source_id, zone, sender):
_obj.settings["sources"][source_id] = \
self.__gen_settings(0, sender)
# add information whether we add to default or specific zone
_obj.settings["sources"][source_id]["__default__"] = (not zone or zone == "")
def change_zone_of_source(self, zone, source, sender=None):
self._fw.check_panic()
_old_zone = self.get_zone_of_source(source)
_new_zone = self._fw.check_zone(zone)
if _new_zone == _old_zone:
return _old_zone
if check_mac(source):
source = source.upper()
if _old_zone is not None:
self.remove_source(_old_zone, source)
_zone = self.add_source(zone, source, sender)
return _zone
def remove_source(self, zone, source,
use_transaction=None):
self._fw.check_panic()
if check_mac(source):
source = source.upper()
zos = self.get_zone_of_source(source)
if zos is None:
raise FirewallError(errors.UNKNOWN_SOURCE,
"'%s' is not in any zone" % source)
_zone = zos if zone == "" else self._fw.check_zone(zone)
if zos != _zone:
raise FirewallError(errors.ZONE_CONFLICT,
"remove_source(%s, %s): zos='%s'" % \
(zone, source, zos))
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
_obj = self._zones[_zone]
source_id = self.__source_id(source)
transaction.add_post(self.__unregister_source, _obj, source_id)
self._source(False, _zone, source_id[0], source_id[1], transaction)
if use_transaction is None:
transaction.execute(True)
return _zone
def __unregister_source(self, _obj, source_id):
if source_id in _obj.settings["sources"]:
del _obj.settings["sources"][source_id]
def query_source(self, zone, source):
if check_mac(source):
source = source.upper()
return self.__source_id(source) in self.get_settings(zone)["sources"]
def list_sources(self, zone):
return [ k[1] for k in self.get_settings(zone)["sources"].keys() ]
def _interface(self, enable, zone, interface, transaction, append=False):
for backend in self._fw.enabled_backends():
if not backend.policies_supported:
continue
for policy in self._zone_policies[zone]:
for (table, chain) in self._fw.policy._get_table_chains_for_zone_dispatch(policy):
rules = backend.build_zone_source_interface_rules(enable,
zone, policy, interface, table, chain, append)
transaction.add_rules(backend, rules)
# intra zone forward
policy = self.policy_name_from_zones(zone, "ANY")
# Skip adding wildcard/catch-all interface (for default
# zone). Otherwise it would allow forwarding from interface
# in default zone -> interface not in default zone (but in
# a different zone).
if self.get_settings(zone)["forward"] and interface not in ["+", "*"]:
rules = backend.build_zone_forward_rules(enable, zone, policy, "filter", interface=interface)
transaction.add_rules(backend, rules)
# update policy dispatch for any policy using this zone in ingress
# or egress
for policy in self._fw.policy.get_policies_not_derived_from_zone():
if zone not in self._fw.policy.list_ingress_zones(policy) and \
zone not in self._fw.policy.list_egress_zones(policy):
continue
if policy in self._fw.policy.get_active_policies_not_derived_from_zone() and self._fw.policy.get_policy(policy).applied:
# first remove the old set of interfaces using the current zone
# settings.
if not enable and len(self.list_interfaces(zone)) == 1:
self._fw.policy.unapply_policy_settings(policy, use_transaction=transaction)
else:
self._fw.policy._ingress_egress_zones(False, policy, transaction)
# after the transaction ends and therefore the interface
# has been added to the zone's settings, update the
# dependent policies
transaction.add_post(lambda p: (p in self._fw.policy.get_active_policies_not_derived_from_zone()) and \
self._fw.policy._ingress_egress_zones_transaction(True, p), policy)
elif enable:
transaction.add_post(lambda p: (p in self._fw.policy.get_active_policies_not_derived_from_zone()) and \
self._fw.policy.apply_policy_settings(p), policy)
# IPSETS
def _ipset_family(self, name):
if self._ipset_type(name) == "hash:mac":
return None
return self._fw.ipset.get_family(name, applied=False)
def _ipset_type(self, name):
return self._fw.ipset.get_type(name, applied=False)
def _ipset_match_flags(self, name, flag):
return ",".join([flag] * self._fw.ipset.get_dimension(name))
def _check_ipset_applied(self, name):
return self._fw.ipset.check_applied(name)
def _check_ipset_type_for_source(self, name):
_type = self._ipset_type(name)
if _type not in SOURCE_IPSET_TYPES:
raise FirewallError(
errors.INVALID_IPSET,
"ipset '%s' with type '%s' not usable as source" % \
(name, _type))
def _source(self, enable, zone, ipv, source, transaction):
# For mac source bindings ipv is an empty string, the mac source will
# be added for ipv4 and ipv6
for backend in [self._fw.get_backend_by_ipv(ipv)] if ipv else self._fw.enabled_backends():
if not backend.policies_supported:
continue
for policy in self._zone_policies[zone]:
for (table, chain) in self._fw.policy._get_table_chains_for_zone_dispatch(policy):
rules = backend.build_zone_source_address_rules(enable, zone,
policy, source, table, chain)
transaction.add_rules(backend, rules)
# intra zone forward
policy = self.policy_name_from_zones(zone, "ANY")
if self.get_settings(zone)["forward"]:
rules = backend.build_zone_forward_rules(enable, zone, policy, "filter", source=source)
transaction.add_rules(backend, rules)
# update policy dispatch for any policy using this zone in ingress
# or egress
for policy in self._fw.policy.get_policies_not_derived_from_zone():
if zone not in self._fw.policy.list_ingress_zones(policy) and \
zone not in self._fw.policy.list_egress_zones(policy):
continue
if policy in self._fw.policy.get_active_policies_not_derived_from_zone() and self._fw.policy.get_policy(policy).applied:
# first remove the old set of sources using the current zone
# settings.
if not enable and len(self.list_sources(zone)) == 1:
self._fw.policy.unapply_policy_settings(policy, use_transaction=transaction)
else:
self._fw.policy._ingress_egress_zones(False, policy, transaction)
# after the transaction ends and therefore the sources
# has been added to the zone's settings, update the
# dependent policies
transaction.add_post(lambda p: (p in self._fw.policy.get_active_policies_not_derived_from_zone()) and \
self._fw.policy._ingress_egress_zones_transaction(True, p), policy)
elif enable:
transaction.add_post(lambda p: (p in self._fw.policy.get_active_policies_not_derived_from_zone()) and \
self._fw.policy.apply_policy_settings(p), policy)
def add_service(self, zone, service, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_service(p_name, service, timeout, sender)
return zone
def remove_service(self, zone, service):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_service(p_name, service)
return zone
def query_service(self, zone, service):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_service(p_name, service)
def list_services(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.list_services(p_name)
def add_port(self, zone, port, protocol, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_port(p_name, port, protocol, timeout, sender)
return zone
def remove_port(self, zone, port, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_port(p_name, port, protocol)
return zone
def query_port(self, zone, port, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_port(p_name, port, protocol)
def list_ports(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.list_ports(p_name)
def add_source_port(self, zone, source_port, protocol, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_source_port(p_name, source_port, protocol, timeout, sender)
return zone
def remove_source_port(self, zone, source_port, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_source_port(p_name, source_port, protocol)
return zone
def query_source_port(self, zone, source_port, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_source_port(p_name, source_port, protocol)
def list_source_ports(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.list_source_ports(p_name)
def _rich_rule_to_policies(self, zone, rule):
zone = self._fw.check_zone(zone)
if type(rule.action) == Rich_Mark:
return [self.policy_name_from_zones(zone, "ANY")]
elif type(rule.element) in [Rich_Service, Rich_Port, Rich_Protocol,
Rich_SourcePort, Rich_IcmpBlock, Rich_IcmpType]:
return [self.policy_name_from_zones(zone, "HOST")]
elif type(rule.element) in [Rich_ForwardPort]:
return [self.policy_name_from_zones(zone, "ANY")]
elif type(rule.element) in [Rich_Masquerade]:
return [self.policy_name_from_zones("ANY", zone)]
elif type(rule.element) in [Rich_Tcp_Mss_Clamp]:
return [self.policy_name_from_zones(zone, "ANY")]
elif rule.element is None:
return [self.policy_name_from_zones(zone, "HOST")]
else:
raise FirewallError("Rich rule type (%s) not handled." % (type(rule.element)))
def add_rule(self, zone, rule, timeout=0, sender=None):
for p_name in self._rich_rule_to_policies(zone, rule):
self._fw.policy.add_rule(p_name, rule, timeout, sender)
return zone
def remove_rule(self, zone, rule):
for p_name in self._rich_rule_to_policies(zone, rule):
self._fw.policy.remove_rule(p_name, rule)
return zone
def query_rule(self, zone, rule):
ret = True
for p_name in self._rich_rule_to_policies(zone, rule):
ret = ret and self._fw.policy.query_rule(p_name, rule)
return ret
def list_rules(self, zone):
zone = self._fw.check_zone(zone)
ret = set()
for p_name in [self.policy_name_from_zones(zone, "ANY"),
self.policy_name_from_zones(zone, "HOST"),
self.policy_name_from_zones("ANY", zone)]:
ret.update(set(self._fw.policy.list_rules(p_name)))
return list(ret)
def add_protocol(self, zone, protocol, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_protocol(p_name, protocol, timeout, sender)
return zone
def remove_protocol(self, zone, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_protocol(p_name, protocol)
return zone
def query_protocol(self, zone, protocol):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_protocol(p_name, protocol)
def list_protocols(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.list_protocols(p_name)
def add_masquerade(self, zone, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones("ANY", zone)
self._fw.policy.add_masquerade(p_name, timeout, sender)
return zone
def remove_masquerade(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones("ANY", zone)
self._fw.policy.remove_masquerade(p_name)
return zone
def query_masquerade(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones("ANY", zone)
return self._fw.policy.query_masquerade(p_name)
def add_forward_port(self, zone, port, protocol, toport=None,
toaddr=None, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "ANY")
self._fw.policy.add_forward_port(p_name, port, protocol, toport, toaddr,
timeout, sender)
return zone
def remove_forward_port(self, zone, port, protocol, toport=None,
toaddr=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "ANY")
self._fw.policy.remove_forward_port(p_name, port, protocol, toport, toaddr)
return zone
def query_forward_port(self, zone, port, protocol, toport=None,
toaddr=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "ANY")
return self._fw.policy.query_forward_port(p_name, port, protocol, toport,
toaddr)
def list_forward_ports(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "ANY")
return self._fw.policy.list_forward_ports(p_name)
def add_icmp_block(self, zone, icmp, timeout=0, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_icmp_block(p_name, icmp, timeout, sender)
return zone
def remove_icmp_block(self, zone, icmp):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_icmp_block(p_name, icmp)
return zone
def query_icmp_block(self, zone, icmp):
zone = self._fw.check_zone(zone)
p_name_host = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_icmp_block(p_name_host, icmp)
def list_icmp_blocks(self, zone):
zone = self._fw.check_zone(zone)
p_name_host = self.policy_name_from_zones(zone, "HOST")
return sorted(set(self._fw.policy.list_icmp_blocks(p_name_host)))
def add_icmp_block_inversion(self, zone, sender=None):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.add_icmp_block_inversion(p_name, sender)
return zone
def _icmp_block_inversion(self, enable, zone, transaction):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy._icmp_block_inversion(enable, p_name, transaction)
def remove_icmp_block_inversion(self, zone):
zone = self._fw.check_zone(zone)
p_name = self.policy_name_from_zones(zone, "HOST")
self._fw.policy.remove_icmp_block_inversion(p_name)
return zone
def query_icmp_block_inversion(self, zone):
zone = self._fw.check_zone(zone)
p_name_host = self.policy_name_from_zones(zone, "HOST")
return self._fw.policy.query_icmp_block_inversion(p_name_host)
def _forward(self, enable, zone, transaction):
p_name = self.policy_name_from_zones(zone, "ANY")
for interface in self._zones[zone].settings["interfaces"]:
for backend in self._fw.enabled_backends():
if not backend.policies_supported:
continue
rules = backend.build_zone_forward_rules(enable, zone, p_name, "filter", interface=interface)
transaction.add_rules(backend, rules)
for ipv,source in self._zones[zone].settings["sources"]:
for backend in [self._fw.get_backend_by_ipv(ipv)] if ipv else self._fw.enabled_backends():
if not backend.policies_supported:
continue
rules = backend.build_zone_forward_rules(enable, zone, p_name, "filter", source=source)
transaction.add_rules(backend, rules)
def __forward_id(self):
return True
def add_forward(self, zone, timeout=0, sender=None,
use_transaction=None):
_zone = self._fw.check_zone(zone)
self._fw.check_timeout(timeout)
self._fw.check_panic()
_obj = self._zones[_zone]
forward_id = self.__forward_id()
if forward_id in _obj.settings["forward"]:
raise FirewallError(errors.ALREADY_ENABLED,
"forward already enabled in '%s'" % _zone)
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
if _obj.applied:
self._forward(True, _zone, transaction)
self.__register_forward(_obj, forward_id, timeout, sender)
transaction.add_fail(self.__unregister_forward, _obj, forward_id)
if use_transaction is None:
transaction.execute(True)
return _zone
def __register_forward(self, _obj, forward_id, timeout, sender):
_obj.settings["forward"][forward_id] = \
self.__gen_settings(timeout, sender)
def remove_forward(self, zone, use_transaction=None):
_zone = self._fw.check_zone(zone)
self._fw.check_panic()
_obj = self._zones[_zone]
forward_id = self.__forward_id()
if forward_id not in _obj.settings["forward"]:
raise FirewallError(errors.NOT_ENABLED,
"forward not enabled in '%s'" % _zone)
if use_transaction is None:
transaction = self.new_transaction()
else:
transaction = use_transaction
if _obj.applied:
self._forward(False, _zone, transaction)
transaction.add_post(self.__unregister_forward, _obj, forward_id)
if use_transaction is None:
transaction.execute(True)
return _zone
def __unregister_forward(self, _obj, forward_id):
if forward_id in _obj.settings["forward"]:
del _obj.settings["forward"][forward_id]
def query_forward(self, zone):
return self.__forward_id() in self.get_settings(zone)["forward"]
|
robovm/robovm-studio | refs/heads/master | python/testData/refactoring/move/moveSymbolFromStatementList/before/src/a.py | 79 | if True:
def func():
pass
while True:
for _ in range(10):
# comment
class C:
pass |
haeusser/tensorflow | refs/heads/master | tensorflow/contrib/graph_editor/tests/reroute_test.py | 17 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import graph_editor as ge
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RerouteTest(test.TestCase):
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a0 = constant_op.constant(1.0, shape=[2], name="a0")
self.b0 = constant_op.constant(2.0, shape=[2], name="b0")
self.c0 = math_ops.add(self.a0, self.b0, name="c0")
self.a1 = constant_op.constant(3.0, shape=[2], name="a1")
self.b1 = constant_op.constant(4.0, shape=[2], name="b1")
self.c1 = math_ops.add(self.a1, self.b1, name="c1")
self.a2 = constant_op.constant(3.0, shape=[3], name="a2")
self.b2 = constant_op.constant(4.0, shape=[3], name="b2")
self.c2 = math_ops.add(self.a2, self.b2, name="c2")
def test_swap(self):
ge.swap_ts([self.a0, self.b0], [self.a1, self.b1])
self.assertTrue(ge.OpMatcher("c0").input_ops("a1", "b1")(self.c0.op))
self.assertTrue(ge.OpMatcher("c1").input_ops("a0", "b0")(self.c1.op))
def test_multiswap(self):
with self.graph.as_default():
a3 = constant_op.constant(3.0, shape=[2], name="a3")
ge.swap_ios(ge.sgv(a3.op).remap_outputs([0, 0]),
ge.sgv(self.a0.op, self.a1.op))
self.assertTrue(ge.OpMatcher("c0").input_ops("a3", "b0")(self.c0.op))
self.assertTrue(ge.OpMatcher("c1").input_ops("a3", "b1")(self.c1.op))
def test_reroute(self):
ge.reroute_ts([self.a0, self.b0], [self.a1, self.b1])
self.assertTrue(ge.OpMatcher("c0").input_ops("a0", "b0")(self.c0.op))
self.assertTrue(ge.OpMatcher("c1").input_ops("a0", "b0")(self.c1.op))
ge.reroute_ts([self.a1, self.b1], [self.a0, self.b0])
self.assertTrue(ge.OpMatcher("c0").input_ops("a1", "b1")(self.c0.op))
self.assertTrue(ge.OpMatcher("c1").input_ops("a1", "b1")(self.c1.op))
def test_compatibility(self):
with self.assertRaises(ValueError):
ge.reroute_ts([self.a0, self.b0], [self.a2, self.b2])
def test_reroute_can_modify(self):
graph = ops.Graph()
# create a special graph where "a" is an ambiguous tensor. That is
# it is both an input and an output of the ops in sgv0.
with graph.as_default():
a = constant_op.constant(1.0, shape=[2], name="a")
b = constant_op.constant(2.0, shape=[2], name="b")
c = math_ops.add(a, b, name="c")
d = math_ops.add(a, c, name="d")
e = constant_op.constant(1.0, shape=[2], name="e")
f = constant_op.constant(2.0, shape=[2], name="f")
g = math_ops.add(e, f, name="g")
sgv0 = ge.sgv(a.op, b.op, c.op)
sgv1 = ge.sgv(e.op, f.op)
ge.swap_outputs(sgv0, sgv1)
self.assertTrue(
ge.OpMatcher("g").input_ops("a", ge.OpMatcher("c").input_ops("a", "b"))(
g.op))
self.assertTrue(ge.OpMatcher("d").input_ops("e", "f")(d.op))
if __name__ == "__main__":
test.main()
|
LeadPipeSoftware/LeadPipe.RabbitHole | refs/heads/master | RabbitHole/__init__.py | 1 | __program_name__ = 'RabbitHole'
__version__ = '1.0.0'
|
jeremiahyan/odoo | refs/heads/master | addons/sale_stock/tests/test_sale_stock_report.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from odoo.tests.common import Form
from odoo.addons.stock.tests.test_report import TestReportsCommon
class TestSaleStockReports(TestReportsCommon):
def test_report_forecast_1_sale_order_replenishment(self):
""" Create and confirm two sale orders: one for the next week and one
for tomorrow. Then check in the report it's the most urgent who is
linked to the qty. on stock.
"""
# make sure first picking doesn't auto-assign
self.picking_type_out.reservation_method = 'manual'
today = datetime.today()
# Put some quantity in stock.
quant_vals = {
'product_id': self.product.id,
'product_uom_id': self.product.uom_id.id,
'location_id': self.stock_location.id,
'quantity': 5,
'reserved_quantity': 0,
}
self.env['stock.quant'].create(quant_vals)
# Create a first SO for the next week.
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.partner
# so_form.validity_date = today + timedelta(days=7)
with so_form.order_line.new() as so_line:
so_line.product_id = self.product
so_line.product_uom_qty = 5
so_1 = so_form.save()
so_1.action_confirm()
so_1.picking_ids.scheduled_date = today + timedelta(days=7)
# Create a second SO for tomorrow.
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.partner
# so_form.validity_date = today + timedelta(days=1)
with so_form.order_line.new() as so_line:
so_line.product_id = self.product
so_line.product_uom_qty = 5
so_2 = so_form.save()
so_2.action_confirm()
so_2.picking_ids.scheduled_date = today + timedelta(days=1)
report_values, docs, lines = self.get_report_forecast(product_template_ids=self.product_template.ids)
self.assertEqual(len(lines), 2)
line_1 = lines[0]
line_2 = lines[1]
self.assertEqual(line_1['quantity'], 5)
self.assertTrue(line_1['replenishment_filled'])
self.assertEqual(line_1['document_out'].id, so_2.id)
self.assertEqual(line_2['quantity'], 5)
self.assertEqual(line_2['replenishment_filled'], False)
self.assertEqual(line_2['document_out'].id, so_1.id)
def test_report_forecast_2_report_line_corresponding_to_so_line_highlighted(self):
""" When accessing the report from a SO line, checks if the correct SO line is highlighted in the report
"""
# We create 2 identical SO
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.partner
with so_form.order_line.new() as line:
line.product_id = self.product
line.product_uom_qty = 5
so1 = so_form.save()
so1.action_confirm()
so2 = so1.copy()
so2.action_confirm()
# Check for both SO if the highlight (is_matched) corresponds to the correct SO
for so in [so1, so2]:
context = {"move_to_match_ids": so.order_line.move_ids.ids}
_, _, lines = self.get_report_forecast(product_template_ids=self.product_template.ids, context=context)
for line in lines:
if line['document_out'] == so:
self.assertTrue(line['is_matched'], "The corresponding SO line should be matched in the forecast report.")
else:
self.assertFalse(line['is_matched'], "A line of the forecast report not linked to the SO shoud not be matched.")
|
kbourgoin/hiicart | refs/heads/master | hiicart/gateway/comp/settings.py | 2 | """Settings for comp gateway
**Optional Settings:**
* *ALLOW_RECURRING_COMP* -- Recurring line items are set to Recurring.
Otherwise, all carts will be set to PendCancel. [default: False]
"""
SETTINGS = {
"ALLOW_RECURRING_COMP": False
}
|
vishnu-kumar/ec2-api | refs/heads/master | ec2api/tests/unit/test_snapshot.py | 2 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from ec2api.tests.unit import base
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
from ec2api.tests.unit import tools
class SnapshotTestCase(base.ApiTestCase):
def test_describe_snapshots(self):
self.cinder.volume_snapshots.list.return_value = [
fakes.OSSnapshot(fakes.OS_SNAPSHOT_1),
fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)]
self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2,
fakes.DB_VOLUME_2)
resp = self.execute('DescribeSnapshots', {})
self.assertThat(resp, matchers.DictMatches(
{'snapshotSet': [fakes.EC2_SNAPSHOT_1, fakes.EC2_SNAPSHOT_2]},
orderless_lists=True))
self.db_api.get_items.assert_any_call(mock.ANY, 'vol')
self.db_api.get_items_by_ids = tools.CopyingMock(
return_value=[fakes.DB_SNAPSHOT_1])
resp = self.execute('DescribeSnapshots',
{'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1})
self.assertThat(resp, matchers.DictMatches(
{'snapshotSet': [fakes.EC2_SNAPSHOT_1]},
orderless_lists=True))
self.db_api.get_items_by_ids.assert_called_once_with(
mock.ANY, set([fakes.ID_EC2_SNAPSHOT_1]))
self.check_filtering(
'DescribeSnapshots', 'snapshotSet',
[
# TODO(ft): declare a constant for the description in fakes
('description', 'fake description'),
('owner-id', fakes.ID_OS_PROJECT),
('progress', '100%'),
('snapshot-id', fakes.ID_EC2_SNAPSHOT_1),
('start-time', fakes.TIME_CREATE_SNAPSHOT_2),
('status', 'completed'),
('volume-id', fakes.ID_EC2_VOLUME_2),
# TODO(ft): declare a constant for the volume size in fakes
('volume-size', 1)
])
self.check_tag_support(
'DescribeSnapshots', 'snapshotSet',
fakes.ID_EC2_SNAPSHOT_1, 'snapshotId')
def test_describe_snapshots_auto_remove(self):
self.cinder.volume_snapshots.list.return_value = []
self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2)
resp = self.execute('DescribeSnapshots', {})
self.assertThat(resp, matchers.DictMatches(
{'snapshotSet': []},
orderless_lists=True))
self.db_api.get_items.assert_any_call(mock.ANY, 'vol')
self.db_api.get_items.assert_any_call(mock.ANY, 'snap')
self.db_api.delete_item.assert_any_call(mock.ANY,
fakes.ID_EC2_SNAPSHOT_1)
def test_describe_snapshots_invalid_parameters(self):
self.cinder.volume_snapshots.list.return_value = [
fakes.OSSnapshot(fakes.OS_SNAPSHOT_1),
fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)]
self.assert_execution_error(
'InvalidSnapshot.NotFound', 'DescribeSnapshots',
{'SnapshotId.1': fakes.random_ec2_id('snap')})
self.cinder.volume_snapshots.list.side_effect = lambda: []
self.assert_execution_error(
'InvalidSnapshot.NotFound', 'DescribeSnapshots',
{'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1})
def test_create_snapshot_from_volume(self):
self.cinder.volume_snapshots.create.return_value = (
fakes.OSSnapshot(fakes.OS_SNAPSHOT_1))
self.db_api.add_item.side_effect = (
tools.get_db_api_add_item(fakes.ID_EC2_SNAPSHOT_1))
self.set_mock_db_items(fakes.DB_VOLUME_2)
self.cinder.volumes.get.side_effect = (
lambda vol_id: (
fakes.OSVolume(fakes.OS_VOLUME_2)
if vol_id == fakes.ID_OS_VOLUME_2
else None))
resp = self.execute(
'CreateSnapshot',
{'VolumeId': fakes.ID_EC2_VOLUME_2})
self.assertThat(fakes.EC2_SNAPSHOT_1, matchers.DictMatches(resp))
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'snap',
tools.purge_dict(fakes.DB_SNAPSHOT_1, ('id',)))
self.cinder.volume_snapshots.create.assert_called_once_with(
fakes.ID_OS_VOLUME_2, force=True)
def test_format_snapshot_maps_status(self):
fake_snapshot = fakes.OSSnapshot(fakes.OS_SNAPSHOT_1)
self.cinder.volume_snapshots.list.return_value = [fake_snapshot]
self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2)
fake_snapshot.status = 'new'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('pending', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'creating'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('pending', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'available'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('completed', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'active'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('completed', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'deleting'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('pending', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'error'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('error', resp['snapshotSet'][0]['status'])
fake_snapshot.status = 'banana'
resp = self.execute('DescribeSnapshots', {})
self.assertEqual('banana', resp['snapshotSet'][0]['status'])
|
Qalthos/ansible | refs/heads/devel | lib/ansible/plugins/doc_fragments/aci.py | 20 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# Copyright: (c) 2017, Swetha Chunduri (@schunduri)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
host:
description:
- IP Address or hostname of APIC resolvable by Ansible control host.
type: str
required: yes
aliases: [ hostname ]
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter C(use_ssl).
type: int
username:
description:
- The username to use for authentication.
type: str
default: admin
aliases: [ user ]
password:
description:
- The password to use for authentication.
- This option is mutual exclusive with C(private_key). If C(private_key) is provided too, it will be used instead.
type: str
required: yes
private_key:
description:
- Either a PEM-formatted private key file or the private key content used for signature-based authentication.
- This value also influences the default C(certificate_name) that is used.
- This option is mutual exclusive with C(password). If C(password) is provided too, it will be ignored.
type: str
required: yes
aliases: [ cert_key ]
certificate_name:
description:
- The X.509 certificate name attached to the APIC AAA user used for signature-based authentication.
- If a C(private_key) filename was provided, this defaults to the C(private_key) basename, without extension.
- If PEM-formatted content was provided for C(private_key), this defaults to the C(username) value.
type: str
aliases: [ cert_name ]
output_level:
description:
- Influence the output of this ACI module.
- C(normal) means the standard output, incl. C(current) dict
- C(info) adds informational output, incl. C(previous), C(proposed) and C(sent) dicts
- C(debug) adds debugging output, incl. C(filter_string), C(method), C(response), C(status) and C(url) information
type: str
choices: [ debug, info, normal ]
default: normal
timeout:
description:
- The socket level timeout in seconds.
type: int
default: 30
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
type: bool
default: yes
seealso:
- ref: aci_guide
description: Detailed information on how to manage your ACI infrastructure using Ansible.
- ref: aci_dev_guide
description: Detailed guide on how to write your own Cisco ACI modules to contribute.
'''
|
jlegendary/servo | refs/heads/master | tests/wpt/web-platform-tests/dom/nodes/encoding.py | 272 | from cgi import escape
def main(request, response):
label = request.GET.first('label')
return """<!doctype html><meta charset="%s">""" % escape(label)
|
TeamExodus/external_chromium_org | refs/heads/EXODUS-5.1 | build/android/gyp/util/build_utils.py | 26 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import contextlib
import fnmatch
import json
import os
import pipes
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import zipfile
CHROMIUM_SRC = os.path.normpath(
os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir))
COLORAMA_ROOT = os.path.join(CHROMIUM_SRC,
'third_party', 'colorama', 'src')
@contextlib.contextmanager
def TempDir():
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
def MakeDirectory(dir_path):
try:
os.makedirs(dir_path)
except OSError:
pass
def DeleteDirectory(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def Touch(path, fail_if_missing=False):
if fail_if_missing and not os.path.exists(path):
raise Exception(path + ' doesn\'t exist.')
MakeDirectory(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def FindInDirectory(directory, filename_filter):
files = []
for root, _dirnames, filenames in os.walk(directory):
matched_files = fnmatch.filter(filenames, filename_filter)
files.extend((os.path.join(root, f) for f in matched_files))
return files
def FindInDirectories(directories, filename_filter):
all_files = []
for directory in directories:
all_files.extend(FindInDirectory(directory, filename_filter))
return all_files
def ParseGnList(gn_string):
return ast.literal_eval(gn_string)
def ParseGypList(gyp_string):
# The ninja generator doesn't support $ in strings, so use ## to
# represent $.
# TODO(cjhopman): Remove when
# https://code.google.com/p/gyp/issues/detail?id=327
# is addressed.
gyp_string = gyp_string.replace('##', '$')
if gyp_string.startswith('['):
return ParseGnList(gyp_string)
return shlex.split(gyp_string)
def CheckOptions(options, parser, required=None):
if not required:
return
for option_name in required:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
def WriteJson(obj, path, only_if_changed=False):
old_dump = None
if os.path.exists(path):
with open(path, 'r') as oldfile:
old_dump = oldfile.read()
new_dump = json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
if not only_if_changed or old_dump != new_dump:
with open(path, 'w') as outfile:
outfile.write(new_dump)
def ReadJson(path):
with open(path, 'r') as jsonfile:
return json.load(jsonfile)
class CalledProcessError(Exception):
"""This exception is raised when the process run by CheckOutput
exits with a non-zero exit code."""
def __init__(self, cwd, args, output):
super(CalledProcessError, self).__init__()
self.cwd = cwd
self.args = args
self.output = output
def __str__(self):
# A user should be able to simply copy and paste the command that failed
# into their shell.
copyable_command = '( cd {}; {} )'.format(os.path.abspath(self.cwd),
' '.join(map(pipes.quote, self.args)))
return 'Command failed: {}\n{}'.format(copyable_command, self.output)
# This can be used in most cases like subprocess.check_output(). The output,
# particularly when the command fails, better highlights the command's failure.
# If the command fails, raises a build_utils.CalledProcessError.
def CheckOutput(args, cwd=None,
print_stdout=False, print_stderr=True,
stdout_filter=None,
stderr_filter=None,
fail_func=lambda returncode, stderr: returncode != 0):
if not cwd:
cwd = os.getcwd()
child = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
stdout, stderr = child.communicate()
if stdout_filter is not None:
stdout = stdout_filter(stdout)
if stderr_filter is not None:
stderr = stderr_filter(stderr)
if fail_func(child.returncode, stderr):
raise CalledProcessError(cwd, args, stdout + stderr)
if print_stdout:
sys.stdout.write(stdout)
if print_stderr:
sys.stderr.write(stderr)
return stdout
def GetModifiedTime(path):
# For a symlink, the modified time should be the greater of the link's
# modified time and the modified time of the target.
return max(os.lstat(path).st_mtime, os.stat(path).st_mtime)
def IsTimeStale(output, inputs):
if not os.path.exists(output):
return True
output_time = GetModifiedTime(output)
for i in inputs:
if GetModifiedTime(i) > output_time:
return True
return False
def IsDeviceReady():
device_state = CheckOutput(['adb', 'get-state'])
return device_state.strip() == 'device'
def CheckZipPath(name):
if os.path.normpath(name) != name:
raise Exception('Non-canonical zip path: %s' % name)
if os.path.isabs(name):
raise Exception('Absolute zip path: %s' % name)
def ExtractAll(zip_path, path=None, no_clobber=True, pattern=None):
if path is None:
path = os.getcwd()
elif not os.path.exists(path):
MakeDirectory(path)
with zipfile.ZipFile(zip_path) as z:
for name in z.namelist():
if name.endswith('/'):
continue
if pattern is not None:
if not fnmatch.fnmatch(name, pattern):
continue
CheckZipPath(name)
if no_clobber:
output_path = os.path.join(path, name)
if os.path.exists(output_path):
raise Exception(
'Path already exists from zip: %s %s %s'
% (zip_path, name, output_path))
z.extractall(path=path)
def DoZip(inputs, output, base_dir):
with zipfile.ZipFile(output, 'w') as outfile:
for f in inputs:
CheckZipPath(os.path.relpath(f, base_dir))
outfile.write(f, os.path.relpath(f, base_dir))
def ZipDir(output, base_dir):
with zipfile.ZipFile(output, 'w') as outfile:
for root, _, files in os.walk(base_dir):
for f in files:
path = os.path.join(root, f)
archive_path = os.path.relpath(path, base_dir)
CheckZipPath(archive_path)
outfile.write(path, archive_path)
def MergeZips(output, inputs, exclude_patterns=None):
def Allow(name):
if exclude_patterns is not None:
for p in exclude_patterns:
if fnmatch.fnmatch(name, p):
return False
return True
with zipfile.ZipFile(output, 'w') as out_zip:
for in_file in inputs:
with zipfile.ZipFile(in_file, 'r') as in_zip:
for name in in_zip.namelist():
if Allow(name):
out_zip.writestr(name, in_zip.read(name))
def PrintWarning(message):
print 'WARNING: ' + message
def PrintBigWarning(message):
print '***** ' * 8
PrintWarning(message)
print '***** ' * 8
def GetSortedTransitiveDependencies(top, deps_func):
"""Gets the list of all transitive dependencies in sorted order.
There should be no cycles in the dependency graph.
Args:
top: a list of the top level nodes
deps_func: A function that takes a node and returns its direct dependencies.
Returns:
A list of all transitive dependencies of nodes in top, in order (a node will
appear in the list at a higher index than all of its dependencies).
"""
def Node(dep):
return (dep, deps_func(dep))
# First: find all deps
unchecked_deps = list(top)
all_deps = set(top)
while unchecked_deps:
dep = unchecked_deps.pop()
new_deps = deps_func(dep).difference(all_deps)
unchecked_deps.extend(new_deps)
all_deps = all_deps.union(new_deps)
# Then: simple, slow topological sort.
sorted_deps = []
unsorted_deps = dict(map(Node, all_deps))
while unsorted_deps:
for library, dependencies in unsorted_deps.items():
if not dependencies.intersection(unsorted_deps.keys()):
sorted_deps.append(library)
del unsorted_deps[library]
return sorted_deps
def GetPythonDependencies():
"""Gets the paths of imported non-system python modules.
A path is assumed to be a "system" import if it is outside of chromium's
src/. The paths will be relative to the current directory.
"""
module_paths = (m.__file__ for m in sys.modules.itervalues()
if m is not None and hasattr(m, '__file__'))
abs_module_paths = map(os.path.abspath, module_paths)
non_system_module_paths = [
p for p in abs_module_paths if p.startswith(CHROMIUM_SRC)]
def ConvertPycToPy(s):
if s.endswith('.pyc'):
return s[:-1]
return s
non_system_module_paths = map(ConvertPycToPy, non_system_module_paths)
non_system_module_paths = map(os.path.relpath, non_system_module_paths)
return sorted(set(non_system_module_paths))
def AddDepfileOption(parser):
parser.add_option('--depfile',
help='Path to depfile. This must be specified as the '
'action\'s first output.')
def WriteDepfile(path, dependencies):
with open(path, 'w') as depfile:
depfile.write(path)
depfile.write(': ')
depfile.write(' '.join(dependencies))
depfile.write('\n')
def ExpandFileArgs(args):
"""Replaces file-arg placeholders in args.
These placeholders have the form:
@FileArg(filename:key1:key2:...:keyn)
The value of such a placeholder is calculated by reading 'filename' as json.
And then extracting the value at [key1][key2]...[keyn].
Note: This intentionally does not return the list of files that appear in such
placeholders. An action that uses file-args *must* know the paths of those
files prior to the parsing of the arguments (typically by explicitly listing
them in the action's inputs in build files).
"""
new_args = list(args)
file_jsons = dict()
r = re.compile('@FileArg\((.*?)\)')
for i, arg in enumerate(args):
match = r.search(arg)
if not match:
continue
if match.end() != len(arg):
raise Exception('Unexpected characters after FileArg: ' + arg)
lookup_path = match.group(1).split(':')
file_path = lookup_path[0]
if not file_path in file_jsons:
file_jsons[file_path] = ReadJson(file_path)
expansion = file_jsons[file_path]
for k in lookup_path[1:]:
expansion = expansion[k]
new_args[i] = arg[:match.start()] + str(expansion)
return new_args
|
DarthThanatos/citySimNG | refs/heads/master | citySimNGView/CreatorView/SheetView.py | 1 | from wx import wx
from wx.lib.scrolledpanel import ScrolledPanel
from CreatorView import Consts
from CreatorView.NumberFillingChecker import NumberFillingChecker
from CreatorView.RestorableView import RestorableNameInput, RestorableDescriptionArea, RestorableImageBmp, RestorableLogArea, RestorableStdSelector
from utils.ButtonsFactory import ButtonsFactory
from utils.OnShowUtil import OnShowUtil
from utils.RelativePaths import relative_textures_path
from viewmodel.SheetEntityChecker import AddModeSheetEntityChecker, EDIT_MODE, EditModeSheetEntityChecker
class SheetView(ScrolledPanel):
def __init__(self,parent, size, frame, currentDependencies):
ScrolledPanel.__init__(self, size = size, parent = parent, style = wx.SIMPLE_BORDER)
self.Bind(wx.EVT_SHOW, self.onShow, self)
self.SetupScrolling()
self.size = size
self.frame = frame
self.currentDependencies = currentDependencies
self.wakeUpData = None
self.restorableViews = []
self.childrenCheckers = []
self.entityIconRelativePath = self.getDefaultIconRelativePath()
self.sheetChecker = AddModeSheetEntityChecker(self)
self.sheet_name = self.getSheetName()
self.SetBackgroundColour((0,0,0))
self.SetForegroundColour((255,255,255))
def onShow(self, event):
OnShowUtil().onCreatorSheetShow(self, event)
def getSheetName(self):
raise Exception("getSheetName not implemented")
def getDefaultIconRelativePath(self):
raise Exception("getDefaultIconRealtivePath not implemented")
def getEntityType(self):
raise Exception("getEntityType not implemented")
def getEntityNameKey(self):
raise Exception("getEntityNameKey not implemented")
def submit(self, event):
raise Exception("submit not implemented")
def getEntityChecker(self):
raise Exception("getEntityChecker not implemented")
def getValidKeySet(self):
raise Exception("getValidKeySet not implemented")
def addToSizerWithSpace(self, sizer, view, space = 10, alignment = wx.CENTER):
sizer.Add(view, 0, alignment)
sizer.AddSpacer(space)
def newBasicCharacteristicsVerticalSizer(self):
basicCharacteristicsVerticalSizer = wx.BoxSizer(wx.VERTICAL)
self.addToSizerWithSpace(basicCharacteristicsVerticalSizer, self.newDescriptionAreaVerticalSizer())
self.addToSizerWithSpace(basicCharacteristicsVerticalSizer, self.newPredecessorPickerHorizontalSizer(), space = 5)
self.addToSizerWithSpace(basicCharacteristicsVerticalSizer, self.newSuccesorPickerHorizontalSizer())
self.addToSizerWithSpace(basicCharacteristicsVerticalSizer, self.newEntityIconHorizontalSizer())
return basicCharacteristicsVerticalSizer
def newRootSizer(self, characteristicVerticalSizer, topPadding):
rootSizer = wx.BoxSizer(wx.VERTICAL)
rootSizer.AddSpacer(topPadding)
self.addToSizerWithSpace(rootSizer, self.newEntityNameHorizontalSizer(self.getEntityType()))
self.addToSizerWithSpace(rootSizer, self.newLine(), space= 10, alignment=wx.EXPAND)
self.addToSizerWithSpace(rootSizer, self.newMainSheetPartHorizontalSizer(characteristicVerticalSizer))
self.addToSizerWithSpace(rootSizer, self.newLine(), space=10, alignment=wx.EXPAND)
self.addToSizerWithSpace(rootSizer, self.newButtonsPanelHorizontalSizer(self.submit), space = 75)
return rootSizer
def initRootSizer(self, characteristicVerticalSizer, topPadding = 10):
rootSizer = self.newRootSizer(characteristicVerticalSizer, topPadding)
self.SetSizer(rootSizer)
rootSizer.SetDimension(0, 0, self.size[0], self.size[1])
def newEntityNameHorizontalSizer(self, defaultName= None):
entityNameHorizontalSizer = wx.BoxSizer(wx.HORIZONTAL)
self.addToSizerWithSpace(entityNameHorizontalSizer,self.newNameFieldLabel(self.getEntityType()))
entityNameHorizontalSizer.Add(self.newNameInput(defaultName if defaultName is not None else self.getEntityType()))
return entityNameHorizontalSizer
def newNameFieldLabel(self, entity_type):
return wx.StaticText(self,-1, "Name of this " + entity_type)
def newNameInput(self, defaultName):
self.NameInput = wx.TextCtrl(self, -1, defaultName)
self.restorableViews.append(RestorableNameInput(self, self.NameInput))
return self.NameInput
def newLine(self, style = wx.HORIZONTAL):
return wx.StaticLine(self, -1, style = style)
def newMainSheetPartHorizontalSizer(self, entityCharacteristicsVerticalSizer):
mainSheetPartHorizontalSizer = wx.BoxSizer(wx.HORIZONTAL)
self.addToSizerWithSpace(mainSheetPartHorizontalSizer, entityCharacteristicsVerticalSizer, space=50, alignment=wx.LEFT)
self.addToSizerWithSpace(mainSheetPartHorizontalSizer,self.newLine(wx.VERTICAL), space = 50, alignment=wx.EXPAND)
mainSheetPartHorizontalSizer.Add(self.newLogAreaVerticalSizer(),0,wx.RIGHT)
return mainSheetPartHorizontalSizer
def newLogAreaVerticalSizer(self):
log_area_vertical_sizer = wx.BoxSizer(wx.VERTICAL)
log_area_vertical_sizer.Add(self.newLogAreaLabel(),0,wx.CENTER)
self.addToSizerWithSpace(log_area_vertical_sizer, self.newLogArea(), space=50)
return log_area_vertical_sizer
def newLogAreaLabel(self):
return wx.StaticText(self,-1,"Below lies logging area, showing error msgs")
def newLogArea(self):
self.log_area = wx.TextCtrl(self, -1, size = (500,350), style=wx.TE_MULTILINE | wx.TE_READONLY)
self.restorableViews.append(RestorableLogArea(self, self.log_area))
return self.log_area
def newButtonsPanelHorizontalSizer(self, onSubmit):
buttons_panel_horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_panel_horizontal_sizer.Add(self.newCreateEntityButton(onSubmit))
buttons_panel_horizontal_sizer.Add(self.newCancelButton(self.moveToMainPanel))
return buttons_panel_horizontal_sizer
def newCreateEntityButton(self, onSubmit):
return ButtonsFactory().newButton(self, "Submit", onSubmit)
def newCancelButton(self, onCancel):
return ButtonsFactory().newButton(self, "Cancel", onCancel)
def newDescriptionAreaVerticalSizer(self):
descriptionAreaVerticalSizer = wx.BoxSizer(wx.VERTICAL)
descriptionAreaVerticalSizer.Add(self.newDescriptionFieldLabel(self.getEntityType()), 0, wx.CENTER)
descriptionAreaVerticalSizer.Add(self.newDescriptionArea(), 0, wx.CENTER)
return descriptionAreaVerticalSizer
def newDescriptionFieldLabel(self, entity_type):
return wx.StaticText(self, -1, "Description of " + entity_type + "for Tutorial module")
def newDescriptionArea(self):
self.descriptionArea = wx.TextCtrl(self, -1,size=(400, 200), style=wx.TE_MULTILINE)
self.restorableViews.append(RestorableDescriptionArea(self, self.descriptionArea))
return self.descriptionArea
def newPredecessorPickerHorizontalSizer(self,):
predecessor_picker_horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.addToSizerWithSpace(predecessor_picker_horizontal_sizer, self.newPredeccesorLabel(self.getEntityType()))
predecessor_picker_horizontal_sizer.Add(self.newPredecessorSelector())
return predecessor_picker_horizontal_sizer
def newSelector(self):
return wx.ComboBox(self, choices=["None"], size = (125,-1), style=wx.CB_READONLY)
def newPredecessorSelector(self):
self.predecessorSelector = self.newSelector()
self.restorableViews.append(
RestorableStdSelector(self, self.predecessorSelector, Consts.PREDECESSOR)
)
return self.predecessorSelector
def newSuccesorPickerHorizontalSizer(self):
successor_picker_horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.addToSizerWithSpace(successor_picker_horizontal_sizer, self.newSuccesorLabel(self.getEntityType()))
successor_picker_horizontal_sizer.Add(self.newSuccessorSelector())
return successor_picker_horizontal_sizer
def newSuccessorSelector(self):
self.successorSelector = self.newSelector()
self.restorableViews.append(
RestorableStdSelector(self, self.successorSelector, Consts.SUCCESSOR)
)
return self.successorSelector
def newPredeccesorLabel(self, entity_type):
return wx.StaticText(self, -1, "Predecessor " + entity_type +":", size = (150, -1))
def newSuccesorLabel(self,entity_type):
return wx.StaticText(self, -1, "Successor " + entity_type + ":", size = (125,-1))
def newEntityIconHorizontalSizer(self):
entityIconHorizontalSizer = wx.BoxSizer(wx.HORIZONTAL)
self.addToSizerWithSpace(entityIconHorizontalSizer, self.newImageInfoLabel())
self.addToSizerWithSpace(entityIconHorizontalSizer,self.newEntityBmp())
entityIconHorizontalSizer.Add(self.newImageSelectorButton())
return entityIconHorizontalSizer
def newImageInfoLabel(self):
return wx.StaticText(self, -1, "Your texture: ")
def newEntityBmp(self):
self.imageBitmap = self.newScaledImgBitmap(relative_textures_path + "DefaultBuilding.jpg")
self.restorableViews.append(RestorableImageBmp(self, self.imageBitmap))
return self.imageBitmap
def newScaledImg(self, non_relative_path):
image = wx.Image(name = non_relative_path) #"..\\..\\resources\\Textures\\DefaultBuilding.jpg"
return image.Scale(32,32)
def newScaledImgBitmap(self, non_relative_path):
return wx.StaticBitmap(self, wx.ID_ANY, wx.BitmapFromImage(self.newScaledImg(non_relative_path)), size = (32,32))
def newImageSelectorButton(self):
return ButtonsFactory().newButton(self, "Choose another texture", self.selectImage, size = (-1, 32))
def selectImage(self, event):
dlg = self.newImgDialog()
self.onImageSelected(dlg)
def newImgDialog(self):
return wx.FileDialog(
self,
defaultDir= relative_textures_path, #"..\\..\\resources\\Textures\\",
message="Choose an image",
wildcard="*.png;*.jpg",
style=wx.FD_OPEN
)
def onImageSelected(self, dlg):
if dlg.ShowModal() == wx.ID_OK:
import os
abs_path_to_res = os.path.abspath(relative_textures_path)
abs_path_to_img = os.path.abspath(dlg.GetPath())
path_relative_to_resdir = os.path.relpath(abs_path_to_img, abs_path_to_res)
path = dlg.GetPath()
# self.entityIconRelativePath = dlg.GetFilename()
self.entityIconRelativePath = path_relative_to_resdir
self.imageBitmap.SetBitmap(wx.BitmapFromImage(self.newScaledImg(path)))
def moveToMainPanel(self,event):
self.frame.showPanel("main_panel",initDataForSearchedPanel={"CleanGraph": False})
def restoreRestorables(self, edit_element_name):
for restorableView in self.restorableViews:
restorableView.restoreView(edit_element_name)
def restoreChildrenCheckers(self, sheetMode, edit_element_name):
for child in self.childrenCheckers:
child.fillWithEntries(edit_element_name if sheetMode.getMode() == EDIT_MODE else None)
def setupSheetMode(self, sheetMode, edit_element_name = None):
self.sheetChecker = sheetMode
self.restoreRestorables(edit_element_name)
self.restoreChildrenCheckers(sheetMode, edit_element_name)
def setUpEditMode(self, edit_element_name):
self.setupSheetMode(EditModeSheetEntityChecker(self), edit_element_name)
def setUpAddMode(self):
self.setupSheetMode(AddModeSheetEntityChecker(self))
def newNumberFillingChecker(self, value_desc_label_txt, intro_label_txt, json_key):
numberFillingChecker = NumberFillingChecker(
self,
key_label_txt="Resource:",
value_desc_label_txt=value_desc_label_txt,
intro_label_txt=intro_label_txt,
json_key=json_key
)
self.childrenCheckers.append(numberFillingChecker)
return numberFillingChecker
def newResourcesConsumedChecker(self):
return self.newNumberFillingChecker(
"Consumed in quantity:",
"Consumed resources",
Consts.CONSUMES
) |
clovertrail/cloudinit-bis | refs/heads/master | cloudinit/cloud.py | 6 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
from cloudinit import log as logging
from cloudinit.reporting import events
LOG = logging.getLogger(__name__)
# This class is the high level wrapper that provides
# access to cloud-init objects without exposing the stage objects
# to handler and or module manipulation. It allows for cloud
# init to restrict what those types of user facing code may see
# and or adjust (which helps avoid code messing with each other)
#
# It also provides util functions that avoid having to know
# how to get a certain member from this submembers as well
# as providing a backwards compatible object that can be maintained
# while the stages/other objects can be worked on independently...
class Cloud(object):
def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
self.datasource = datasource
self.paths = paths
self.distro = distro
self._cfg = cfg
self._runners = runners
if reporter is None:
reporter = events.ReportEventStack(
name="unnamed-cloud-reporter",
description="unnamed-cloud-reporter",
reporting_enabled=False)
self.reporter = reporter
# If a 'user' manipulates logging or logging services
# it is typically useful to cause the logging to be
# setup again.
def cycle_logging(self):
logging.resetLogging()
logging.setupLogging(self.cfg)
@property
def cfg(self):
# Ensure that not indirectly modified
return copy.deepcopy(self._cfg)
def run(self, name, functor, args, freq=None, clear_on_fail=False):
return self._runners.run(name, functor, args, freq, clear_on_fail)
def get_template_filename(self, name):
fn = self.paths.template_tpl % (name)
if not os.path.isfile(fn):
LOG.warn("No template found at %s for template named %s", fn, name)
return None
return fn
# The rest of thes are just useful proxies
def get_userdata(self, apply_filter=True):
return self.datasource.get_userdata(apply_filter)
def get_instance_id(self):
return self.datasource.get_instance_id()
@property
def launch_index(self):
return self.datasource.launch_index
def get_public_ssh_keys(self):
return self.datasource.get_public_ssh_keys()
def get_locale(self):
return self.datasource.get_locale()
def get_hostname(self, fqdn=False):
return self.datasource.get_hostname(fqdn=fqdn)
def device_name_to_device(self, name):
return self.datasource.device_name_to_device(name)
def get_ipath_cur(self, name=None):
return self.paths.get_ipath_cur(name)
def get_cpath(self, name=None):
return self.paths.get_cpath(name)
def get_ipath(self, name=None):
return self.paths.get_ipath(name)
|
xydinesh/webdev | refs/heads/master | werkzeug/contrib/sessions.py | 77 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import tempfile
from os import path
from time import time
from random import random
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from cPickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.wsgi import ClosingIterator
from werkzeug.posixemulation import rename
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return random()
def generate_key(salt=None):
return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = '.__wz_sess'
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None, renew_missing=False, mode=0644):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, unicode):
filename_template = filename_template.encode(
sys.getfilesystemencoding() or 'utf-8')
assert not filename_template.endswith(_fs_transaction_suffix), \
'filename templates may not end with %s' % _fs_transaction_suffix
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, unicode):
sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
dir=self.path)
f = os.fdopen(fd, 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), 'rb')
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
mbareta/edx-platform-ft | refs/heads/open-release/eucalyptus.master | common/lib/i18n_tests/test_extract_and_generate.py | 16 | """
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
from datetime import datetime, timedelta
import os
import random
import re
import sys
import string
import subprocess
from unittest import TestCase
from mock import patch
from polib import pofile
from pytz import UTC
from i18n import extract
from i18n import generate
from i18n import dummy
from i18n.config import CONFIGURATION
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
super(TestGenerate, cls).setUpClass()
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbosity=0)
dummy.main(verbosity=0)
@classmethod
def tearDownClass(cls):
# Clear the Esperanto & RTL directories of any test artifacts
cmd = "git checkout conf/locale/eo conf/locale/rtl"
sys.stderr.write("Cleaning up dummy language directories: " + cmd)
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super(TestGenerate, cls).tearDownClass()
def setUp(self):
super(TestGenerate, self).setUp()
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())
generate.merge(CONFIGURATION.source_locale, target=filename)
self.assertTrue(os.path.exists(filename))
os.remove(filename)
# Patch dummy_locales to not have esperanto present
@patch.object(CONFIGURATION, 'dummy_locales', ['fake2'])
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
generate.main(verbosity=0, strict=False)
for locale in CONFIGURATION.translated_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertGreaterEqual(
datetime.fromtimestamp(os.path.getmtime(path), UTC),
self.start_time,
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(CONFIGURATION.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
self.assertEqual(
len(match),
3,
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))
|
thedep2/CouchPotatoServer | refs/heads/develop | libs/pyasn1/type/constraint.py | 382 | #
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
|
edx/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/django/contrib/admindocs/models.py | 634 | # Empty models.py to allow for specifying admindocs as a test label.
|
afoss925/kaggle_schizophrenia_2014 | refs/heads/master | utils.py | 2 | '''Helper utilities to load training / testing data and write predictions.
'''
import csv
from itertools import izip
import numpy as np
def load_data(train=True, sbm_only=False, fnc_only=False):
if train:
fnc="Train/train_FNC.csv"
sbm="Train/train_SBM.csv"
else:
fnc="Test/test_FNC.csv"
sbm="Test/test_SBM.csv"
with open(fnc,'r') as f:
train_fnc = list(csv.reader(f))
fnc_header = train_fnc[0]
fnc_data = np.array([np.array(map(float,i)) for i in train_fnc[1:]])
ids = np.array(fnc_data[:,0],dtype=int)
with open(sbm,'r') as f:
train_sbm = list(csv.reader(f))
sbm_header = train_sbm[0]
sbm_data = np.array([np.array(map(float,i)) for i in train_sbm[1:]])
fnc_data = fnc_data[:,1:]
sbm_data = sbm_data[:,1:]
data = np.column_stack((sbm_data,fnc_data))
if not train:
return ids, data
with open("train/train_labels.csv",'r') as f:
f.next()
labels = np.array([int(i[1]) for i in csv.reader(f)])
if sbm_only:
return ids,sbm_data,labels
elif fnc_only:
return ids,fnc_data,labels
else:
return ids, data, labels
def write_predictions(clf):
ids, data = load_data(False)
# preds = clf.predict_proba(data)[:,1]
preds = get_score(clf, data)
with open("submissions/" + get_name(clf)+".csv",'w') as f:
w = csv.writer(f)
w.writerow(["ID","Probability"])
for item in izip(ids, preds):
w.writerow(item)
def get_score(clf, data):
'''Allows several of different kinds of classifiers,
interchangably. Some (like random forests, SVMs, and logistic
regression) have the method decision_function and some (like naive
bayes) have predict_proba.
'''
try:
out = clf.decision_function(data).ravel()
except AttributeError:
try:
out = clf.predict_proba(data)[:,1]
except AttributeError:
out = clf.predict(data)
return out
|
DailyActie/Surrogate-Model | refs/heads/master | 01-codes/scipy-master/scipy/sparse/__init__.py | 1 | """
=====================================
Sparse matrices (:mod:`scipy.sparse`)
=====================================
.. currentmodule:: scipy.sparse
SciPy 2-D sparse matrix package for numeric data.
Contents
========
Sparse matrix classes
---------------------
.. autosummary::
:toctree: generated/
bsr_matrix - Block Sparse Row matrix
coo_matrix - A sparse matrix in COOrdinate format
csc_matrix - Compressed Sparse Column matrix
csr_matrix - Compressed Sparse Row matrix
dia_matrix - Sparse matrix with DIAgonal storage
dok_matrix - Dictionary Of Keys based sparse matrix
lil_matrix - Row-based linked list sparse matrix
spmatrix - Sparse matrix base class
Functions
---------
Building sparse matrices:
.. autosummary::
:toctree: generated/
eye - Sparse MxN matrix whose k-th diagonal is all ones
identity - Identity matrix in sparse format
kron - kronecker product of two sparse matrices
kronsum - kronecker sum of sparse matrices
diags - Return a sparse matrix from diagonals
spdiags - Return a sparse matrix from diagonals
block_diag - Build a block diagonal sparse matrix
tril - Lower triangular portion of a matrix in sparse format
triu - Upper triangular portion of a matrix in sparse format
bmat - Build a sparse matrix from sparse sub-blocks
hstack - Stack sparse matrices horizontally (column wise)
vstack - Stack sparse matrices vertically (row wise)
rand - Random values in a given shape
random - Random values in a given shape
Save and load sparse matrices:
.. autosummary::
:toctree: generated/
save_npz - Save a sparse matrix to a file using ``.npz`` format.
load_npz - Load a sparse matrix from a file using ``.npz`` format.
Sparse matrix tools:
.. autosummary::
:toctree: generated/
find
Identifying sparse matrices:
.. autosummary::
:toctree: generated/
issparse
isspmatrix
isspmatrix_csc
isspmatrix_csr
isspmatrix_bsr
isspmatrix_lil
isspmatrix_dok
isspmatrix_coo
isspmatrix_dia
Submodules
----------
.. autosummary::
:toctree: generated/
csgraph - Compressed sparse graph routines
linalg - sparse linear algebra routines
Exceptions
----------
.. autosummary::
:toctree: generated/
SparseEfficiencyWarning
SparseWarning
Usage information
=================
There are seven available sparse matrix types:
1. csc_matrix: Compressed Sparse Column format
2. csr_matrix: Compressed Sparse Row format
3. bsr_matrix: Block Sparse Row format
4. lil_matrix: List of Lists format
5. dok_matrix: Dictionary of Keys format
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
7. dia_matrix: DIAgonal format
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
The lil_matrix class supports basic slicing and fancy indexing with a
similar syntax to NumPy arrays. As illustrated below, the COO format
may also be used to efficiently construct matrices. Despite their
similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
functions directly on these matrices because NumPy may not properly convert
them for computations, leading to unexpected (and incorrect) results. If you
do want to apply a NumPy function to these matrices, first check if SciPy has
its own implementation for the given sparse matrix class, or **convert the
sparse matrix to a NumPy array** (e.g. using the `toarray()` method of the
class) first before applying the method.
To perform manipulations such as multiplication or inversion, first
convert the matrix to either CSC or CSR format. The lil_matrix format is
row-based, so conversion to CSR is efficient, whereas conversion to CSC
is less so.
All conversions among the CSR, CSC, and COO formats are efficient,
linear-time operations.
Matrix vector product
---------------------
To do a vector product between a sparse matrix and a vector simply use
the matrix `dot` method, as described in its docstring:
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
therefore using it will result on unexpected results or errors.
The corresponding dense array should be obtained first instead:
>>> np.dot(A.toarray(), v)
array([ 1, -3, -1], dtype=int64)
but then all the performance advantages would be lost.
The CSR format is specially suitable for fast matrix vector products.
Example 1
---------
Construct a 1000x1000 lil_matrix and add some values to it:
>>> from scipy.sparse import lil_matrix
>>> from scipy.sparse.linalg import spsolve
>>> from numpy.linalg import solve, norm
>>> from numpy.random import rand
>>> A = lil_matrix((1000, 1000))
>>> A[0, :100] = rand(100)
>>> A[1, 100:200] = A[0, :100]
>>> A.setdiag(rand(1000))
Now convert it to CSR format and solve A x = b for x:
>>> A = A.tocsr()
>>> b = rand(1000)
>>> x = spsolve(A, b)
Convert it to a dense matrix and solve, and check that the result
is the same:
>>> x_ = solve(A.toarray(), b)
Now we can compute norm of the error with:
>>> err = norm(x-x_)
>>> err < 1e-10
True
It should be small :)
Example 2
---------
Construct a matrix in COO format:
>>> from scipy import sparse
>>> from numpy import array
>>> I = array([0,3,1,0])
>>> J = array([0,3,1,2])
>>> V = array([4,5,7,9])
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
Notice that the indices do not need to be sorted.
Duplicate (i,j) entries are summed when converting to CSR or CSC.
>>> I = array([0,0,1,3,1,0,0])
>>> J = array([0,2,1,3,1,0,0])
>>> V = array([1,1,1,1,1,1,1])
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
This is useful for constructing finite-element stiffness and mass matrices.
Further Details
---------------
CSR column indices are not necessarily sorted. Likewise for CSC row
indices. Use the .sorted_indices() and .sort_indices() methods when
sorted indices are required (e.g. when passing data to other libraries).
"""
from __future__ import division, print_function, absolute_import
# Original code by Travis Oliphant.
# Modified and extended by Ed Schofield, Robert Cimrman,
# Nathan Bell, and Jake Vanderplas.
# for backward compatibility with v0.10. This function is marked as deprecated
# from spfuncs import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
donspaulding/adspygoogle | refs/heads/master | examples/adspygoogle/dfp/v201204/get_creatives_by_statement.py | 2 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all image creatives. The statement retrieves up to the
maximum page size limit of 500. To create an image creative,
run create_creatives.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201204')
# Create statement object to only select image creatives.
values = [{
'key': 'creativeType',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
filter_statement = {'query': 'WHERE creativeType = :creativeType LIMIT 500',
'values': values}
# Get creatives by statement.
response = creative_service.GetCreativesByStatement(filter_statement)[0]
creatives = []
if 'results' in response:
creatives = response['results']
# Display results.
for creative in creatives:
print ('Creative with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (creative['id'], creative['name'], creative['Creative_Type']))
print
print 'Number of results found: %s' % len(creatives)
|
alsrgv/tensorflow | refs/heads/master | tensorflow/python/autograph/pyct/testing/decorators.py | 24 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with test decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
def wrapping_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def standalone_decorator(f):
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return standalone_wrapper
def functional_decorator():
def decorator(f):
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return functional_wrapper
return decorator
|
javo100/plugin.video.PAQUETEDIVIERTAS2 | refs/heads/master | servers/downupload.py | 36 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para videos externos de downupload
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[downupload.py] get_video_url(page_url='%s')" % page_url)
page_url = page_url.replace("amp;","")
data = scrapertools.cache_page(page_url)
video_urls = []
# s1.addVariable('file','http://78.140.181.136:182/d/kka3sx52abiuphevyzfirfaqtihgyq5xlvblnetok2mj4llocdeturoy/video.mp4');
# http://downupload.com:182/d/k2a3kxf2abiuphevyzfirgajremkk3if57xcpelwboz4hbzjnfsvbit6/video.mp4
patron = "(http://[\S]+\.mp4)"
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
scrapertools.printMatches(matches)
for match in matches:
videourl = match
videourl = videourl.replace('%5C','')
videourl = urllib.unquote(videourl)
video_urls.append( [ ".mp4 [Downupload]" , videourl ] )
else:
# Si es un enlace de Descarga se busca el archivo
patron = '<div id="player_code">.*?value[\W]name[\W]param[\W]com[\W]http[\W]false[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
videourl = "http://"+match[0]+".com:"+match[3]+"/d/"+match[2]+"/video."+match[1]
videourl = videourl.replace('|','.')
videourl = urllib.unquote(videourl)
video_urls.append( [ "."+match[1]+" [Downupload]" , videourl ] )
# Localiza enlaces con IP
if len(matches)==0:
patron = '<div id="player_code">.*?value[\W]name[\W]param[\W]http[\W]false[\W](.*?)[\W](.*?)[\W](.*?)[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
videourl = "http://"+match[3]+"."+match[2]+"."+match[1]+"."+match[0]+":"+match[6]+"/d/"+match[5]+"/video."+match[4]
videourl = videourl.replace('|','')
videourl = urllib.unquote(videourl)
video_urls.append( [ "."+match[4]+" [Downupload]" , videourl ] )
# Otro metodo de busqueda
if len(matches)==0:
url = unpackerjs.unpackjs(data)
logger.info("[unpackerjs.py] "+url)
patron = 'src"value="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(url)
for match in matches:
videourl = match
videourl = videourl.replace('|','')
videourl = urllib.unquote(videourl)
video_urls.append( [ "."+videourl.rsplit('.',1)[1]+" [Downupload]" , videourl ] )
for video_url in video_urls:
logger.info("[downupload.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# Downupload http://www.downupload.com/embed-p9oenzlz6xhu.html
patronvideos = '(downupload.com/embed-.*?\.html)'
logger.info("[downupload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[Downupload]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'downupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# Enlaces de Descarga
if len(matches)==0:
patronvideos = '(downupload.com/[\w]+)'
logger.info("[downupload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[Downupload]"
url = match.replace("downupload.com/","http://www.downupload.com/embed-")
url = url+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'downupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
lneoe/python-social-auth | refs/heads/master | social/tests/backends/test_coursera.py | 76 | import json
from social.tests.backends.oauth import OAuth2Test
class CourseraOAuth2Test(OAuth2Test):
backend_path = 'social.backends.coursera.CourseraOAuth2'
user_data_url = \
'https://api.coursera.org/api/externalBasicProfiles.v1?q=me'
expected_username = '560e7ed2076e0d589e88bd74b6aad4b7'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'Bearer',
'expires_in': 1795
})
request_token_body = json.dumps({
'code': 'foobar-code',
'client_id': 'foobar-client-id',
'client_secret': 'foobar-client-secret',
'redirect_uri': 'http://localhost:8000/accounts/coursera/',
'grant_type': 'authorization_code'
})
user_data_body = json.dumps({
'token_type': 'Bearer',
'paging': None,
'elements': [{
'id': '560e7ed2076e0d589e88bd74b6aad4b7'
}],
'access_token': 'foobar',
'expires_in': 1800,
'linked': None
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
rsivapr/scikit-learn | refs/heads/master | sklearn/covariance/tests/test_graph_lasso.py | 5 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (.1, .01):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(emp_cov, alpha=.1, return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
assert_array_almost_equal(model.covariance_, covs['cd'])
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=3, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
tudorvio/nova | refs/heads/master | nova/tests/unit/scheduler/filters/test_utils.py | 53 | # Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.scheduler.filters import utils
from nova import test
from nova.tests.unit.scheduler import fakes
_AGGREGATE_FIXTURES = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'k1': '1', 'k2': '2'},
),
objects.Aggregate(
id=2,
name='bar',
hosts=['fake-host'],
metadata={'k1': '3', 'k2': '4'},
),
objects.Aggregate(
id=3,
name='bar',
hosts=['fake-host'],
metadata={'k1': '6,7', 'k2': '8, 9'},
),
]
class TestUtils(test.NoDBTestCase):
def setUp(self):
super(TestUtils, self).setUp()
def test_aggregate_values_from_key(self):
host_state = fakes.FakeHostState(
'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES})
values = utils.aggregate_values_from_key(host_state, key_name='k1')
self.assertEqual(set(['1', '3', '6,7']), values)
def test_aggregate_values_from_key_with_wrong_key(self):
host_state = fakes.FakeHostState(
'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES})
values = utils.aggregate_values_from_key(host_state, key_name='k3')
self.assertEqual(set(), values)
def test_aggregate_metadata_get_by_host_no_key(self):
host_state = fakes.FakeHostState(
'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES})
metadata = utils.aggregate_metadata_get_by_host(host_state)
self.assertIn('k1', metadata)
self.assertEqual(set(['1', '3', '7', '6']), metadata['k1'])
self.assertIn('k2', metadata)
self.assertEqual(set(['9', '8', '2', '4']), metadata['k2'])
def test_aggregate_metadata_get_by_host_with_key(self):
host_state = fakes.FakeHostState(
'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES})
metadata = utils.aggregate_metadata_get_by_host(host_state, 'k1')
self.assertIn('k1', metadata)
self.assertEqual(set(['1', '3', '7', '6']), metadata['k1'])
def test_aggregate_metadata_get_by_host_empty_result(self):
host_state = fakes.FakeHostState(
'fake', 'node', {'aggregates': []})
metadata = utils.aggregate_metadata_get_by_host(host_state, 'k3')
self.assertEqual({}, metadata)
def test_validate_num_values(self):
f = utils.validate_num_values
self.assertEqual("x", f(set(), default="x"))
self.assertEqual(1, f(set(["1"]), cast_to=int))
self.assertEqual(1.0, f(set(["1"]), cast_to=float))
self.assertEqual(1, f(set([1, 2]), based_on=min))
self.assertEqual(2, f(set([1, 2]), based_on=max))
self.assertEqual(9, f(set(['10', '9']), based_on=min))
def test_instance_uuids_overlap(self):
inst1 = objects.Instance(uuid='aa')
inst2 = objects.Instance(uuid='bb')
instances = [inst1, inst2]
host_state = fakes.FakeHostState('host1', 'node1', {})
host_state.instances = {instance.uuid: instance
for instance in instances}
self.assertTrue(utils.instance_uuids_overlap(host_state, ['aa']))
self.assertFalse(utils.instance_uuids_overlap(host_state, ['zz']))
def test_other_types_on_host(self):
inst1 = objects.Instance(uuid='aa', instance_type_id=1)
host_state = fakes.FakeHostState('host1', 'node1', {})
host_state.instances = {inst1.uuid: inst1}
self.assertFalse(utils.other_types_on_host(host_state, 1))
self.assertTrue(utils.other_types_on_host(host_state, 2))
|
skearnes/pylearn2 | refs/heads/master | pylearn2/datasets/norb.py | 4 | """
An interface to the small NORB dataset. Unlike `./norb_small.py`, this reads
the original NORB file format, not the LISA lab's `.npy` version.
Currently only supports the Small NORB Dataset.
Download the dataset from
`here <http://www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/>`_.
NORB dataset(s) by Fu Jie Huang and Yann LeCun.
"""
__authors__ = "Guillaume Desjardins and Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __authors__.split(" and ")
__license__ = "3-clause BSD"
__maintainer__ = "Matthew Koichi Grimes"
__email__ = "mkg alum mit edu (@..)"
import bz2
import gzip
import logging
import os
import warnings
import numpy
import theano
from pylearn2.datasets import dense_design_matrix
from pylearn2.datasets.cache import datasetCache
from pylearn2.space import VectorSpace, Conv2DSpace, CompositeSpace
logger = logging.getLogger(__name__)
class SmallNORB(dense_design_matrix.DenseDesignMatrix):
"""
An interface to the small NORB dataset.
If instantiated with default arguments, target labels are integers
representing categories, which can be looked up using
category_name = SmallNORB.get_category(label).
If instantiated with multi_target=True, labels are vectors of indices
representing:
[ category, instance, elevation, azimuth, lighting ]
Like with category, there are class methods that map these ints to their
actual values, e.g:
category = SmallNORB.get_category(label[0])
elevation = SmallNORB.get_elevation_degrees(label[2])
Parameters
----------
which_set: str
Must be 'train' or 'test'.
multi_target: bool, optional
If False, each label is an integer labeling the image catergory. If
True, each label is a vector: [category, instance, lighting, elevation,
azimuth]. All labels are given as integers. Use the categories,
elevation_degrees, and azimuth_degrees arrays to map from these
integers to actual values.
"""
# Actual image shape may change, e.g. after being preprocessed by
# datasets.preprocessing.Downsample
original_image_shape = (96, 96)
_categories = ['animal', # four-legged animal
'human', # human figure
'airplane',
'truck',
'car']
@classmethod
def get_category(cls, scalar_label):
"""
Returns the category string corresponding to an integer category label.
"""
return cls._categories[int(scalar_label)]
@classmethod
def get_elevation_degrees(cls, scalar_label):
"""
Returns the elevation, in degrees, corresponding to an integer
elevation label.
"""
scalar_label = int(scalar_label)
assert scalar_label >= 0
assert scalar_label < 9
return 30 + 5 * scalar_label
@classmethod
def get_azimuth_degrees(cls, scalar_label):
"""
Returns the azimuth, in degrees, corresponding to an integer
label.
"""
scalar_label = int(scalar_label)
assert scalar_label >= 0
assert scalar_label <= 34
assert (scalar_label % 2) == 0
return scalar_label * 10
# Maps azimuth labels (ints) to their actual values, in degrees.
azimuth_degrees = numpy.arange(0, 341, 20)
# Maps a label type to its index within a label vector.
label_type_to_index = {'category': 0,
'instance': 1,
'elevation': 2,
'azimuth': 3,
'lighting': 4}
# Number of labels, for each label type.
num_labels_by_type = (len(_categories),
10, # instances
9, # elevations
18, # azimuths
6) # lighting
# [mkg] Dropped support for the 'center' argument for now. In Pylearn 1, it
# shifted the pixel values from [0:255] by subtracting 127.5. Seems like a
# form of preprocessing, which might be better implemented separately using
# the Preprocess class.
def __init__(self, which_set, multi_target=False, stop=None):
assert which_set in ['train', 'test']
self.which_set = which_set
subtensor = None
if stop:
subtensor = slice(0, stop)
X = SmallNORB.load(which_set, 'dat', subtensor=subtensor)
# Casts to the GPU-supported float type, using theano._asarray(), a
# safer alternative to numpy.asarray().
#
# TODO: move the dtype-casting to the view_converter's output space,
# once dtypes-for-spaces is merged into master.
X = theano._asarray(X, theano.config.floatX)
# Formats data as rows in a matrix, for DenseDesignMatrix
X = X.reshape(-1, 2*numpy.prod(self.original_image_shape))
# This is uint8
y = SmallNORB.load(which_set, 'cat', subtensor=subtensor)
if multi_target:
y_extra = SmallNORB.load(which_set, 'info')
y = numpy.hstack((y[:, numpy.newaxis], y_extra))
datum_shape = ((2, ) + # two stereo images
self.original_image_shape +
(1, )) # one color channel
# 's' is the stereo channel: 0 (left) or 1 (right)
axes = ('b', 's', 0, 1, 'c')
view_converter = StereoViewConverter(datum_shape, axes)
super(SmallNORB, self).__init__(X=X,
y=y,
view_converter=view_converter)
@classmethod
def load(cls, which_set, filetype, subtensor):
"""Reads and returns a single file as a numpy array."""
assert which_set in ['train', 'test']
assert filetype in ['dat', 'cat', 'info']
def getPath(which_set):
dirname = os.path.join(os.getenv('PYLEARN2_DATA_PATH'),
'norb_small/original')
if which_set == 'train':
instance_list = '46789'
elif which_set == 'test':
instance_list = '01235'
filename = 'smallnorb-5x%sx9x18x6x2x96x96-%s-%s.mat' % \
(instance_list, which_set + 'ing', filetype)
return os.path.join(dirname, filename)
def parseNORBFile(file_handle, subtensor=None, debug=False):
"""
Load all or part of file 'file_handle' into a numpy ndarray
.. todo::
WRITEME properly
:param file_handle: file from which to read file can be opended
with open(), gzip.open() and bz2.BZ2File()
@type file_handle: file-like object. Can be a gzip open file.
:param subtensor: If subtensor is not None, it should be like the
argument to numpy.ndarray.__getitem__. The following two
expressions should return equivalent ndarray objects, but the one
on the left may be faster and more memory efficient if the
underlying file f is big.
read(file_handle, subtensor) <===> read(file_handle)[*subtensor]
Support for subtensors is currently spotty, so check the code to
see if your particular type of subtensor is supported.
"""
def readNums(file_handle, num_type, count):
"""
Reads 4 bytes from file, returns it as a 32-bit integer.
"""
num_bytes = count * numpy.dtype(num_type).itemsize
string = file_handle.read(num_bytes)
return numpy.fromstring(string, dtype=num_type)
def readHeader(file_handle, debug=False, from_gzip=None):
"""
.. todo::
WRITEME properly
:param file_handle: an open file handle.
:type file_handle: a file or gzip.GzipFile object
:param from_gzip: bool or None
:type from_gzip: if None determine the type of file handle.
:returns: data type, element size, rank, shape, size
"""
if from_gzip is None:
from_gzip = isinstance(file_handle,
(gzip.GzipFile, bz2.BZ2File))
key_to_type = {0x1E3D4C51: ('float32', 4),
# what is a packed matrix?
# 0x1E3D4C52: ('packed matrix', 0),
0x1E3D4C53: ('float64', 8),
0x1E3D4C54: ('int32', 4),
0x1E3D4C55: ('uint8', 1),
0x1E3D4C56: ('int16', 2)}
type_key = readNums(file_handle, 'int32', 1)[0]
elem_type, elem_size = key_to_type[type_key]
if debug:
logger.debug("header's type key, type, type size: "
"{0} {1} {2}".format(type_key, elem_type,
elem_size))
if elem_type == 'packed matrix':
raise NotImplementedError('packed matrix not supported')
num_dims = readNums(file_handle, 'int32', 1)[0]
if debug:
logger.debug('# of dimensions, according to header: '
'{0}'.format(num_dims))
if from_gzip:
shape = readNums(file_handle,
'int32',
max(num_dims, 3))[:num_dims]
else:
shape = numpy.fromfile(file_handle,
dtype='int32',
count=max(num_dims, 3))[:num_dims]
if debug:
logger.debug('Tensor shape, as listed in header: '
'{0}'.format(shape))
return elem_type, elem_size, shape
elem_type, elem_size, shape = readHeader(file_handle, debug)
beginning = file_handle.tell()
num_elems = numpy.prod(shape)
result = None
if isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)):
assert subtensor is None, \
"Subtensors on gzip files are not implemented."
result = readNums(file_handle,
elem_type,
num_elems*elem_size).reshape(shape)
elif subtensor is None:
result = numpy.fromfile(file_handle,
dtype=elem_type,
count=num_elems).reshape(shape)
elif isinstance(subtensor, slice):
if subtensor.step not in (None, 1):
raise NotImplementedError('slice with step',
subtensor.step)
if subtensor.start not in (None, 0):
bytes_per_row = numpy.prod(shape[1:]) * elem_size
file_handle.seek(beginning+subtensor.start * bytes_per_row)
shape[0] = min(shape[0], subtensor.stop) - subtensor.start
num_elems = numpy.prod(shape)
result = numpy.fromfile(file_handle,
dtype=elem_type,
count=num_elems).reshape(shape)
else:
raise NotImplementedError('subtensor access not written yet:',
subtensor)
return result
fname = getPath(which_set)
fname = datasetCache.cache_file(fname)
file_handle = open(fname)
return parseNORBFile(file_handle, subtensor)
def get_topological_view(self, mat=None, single_tensor=True):
"""
.. todo::
WRITEME
"""
result = super(SmallNORB, self).get_topological_view(mat)
if single_tensor:
warnings.warn("The single_tensor argument is True by default to "
"maintain backwards compatibility. This argument "
"will be removed, and the behavior will become that "
"of single_tensor=False, as of August 2014.")
axes = list(self.view_converter.axes)
s_index = axes.index('s')
assert axes.index('b') == 0
num_image_pairs = result[0].shape[0]
shape = (num_image_pairs, ) + self.view_converter.shape
# inserts a singleton dimension where the 's' dimesion will be
mono_shape = shape[:s_index] + (1, ) + shape[(s_index+1):]
for i, res in enumerate(result):
logger.info("result {0} shape: {1}".format(i, str(res.shape)))
result = tuple(t.reshape(mono_shape) for t in result)
result = numpy.concatenate(result, axis=s_index)
else:
warnings.warn("The single_tensor argument will be removed on "
"August 2014. The behavior will be the same as "
"single_tensor=False.")
return result
class StereoViewConverter(object):
"""
Converts stereo image data between two formats:
#. A dense design matrix, one stereo pair per row (`VectorSpace`)
#. An image pair (`CompositeSpace` of two `Conv2DSpace`)
The arguments describe how the data is laid out in the design matrix.
Parameters
----------
shape: tuple
A tuple of 4 ints, describing the shape of each datum. This is the size
of each axis in `<axes>`, excluding the `b` axis.
axes : tuple
Tuple of the following elements in any order:
* 'b' : batch axis
* 's' : stereo axis
* 0 : image axis 0 (row)
* 1 : image axis 1 (column)
* 'c' : channel axis
"""
def __init__(self, shape, axes=None):
shape = tuple(shape)
if not all(isinstance(s, int) for s in shape):
raise TypeError("Shape must be a tuple/list of ints")
if len(shape) != 4:
raise ValueError("Shape array needs to be of length 4, got %s." %
shape)
datum_axes = list(axes)
datum_axes.remove('b')
if shape[datum_axes.index('s')] != 2:
raise ValueError("Expected 's' axis to have size 2, got %d.\n"
" axes: %s\n"
" shape: %s" %
(shape[datum_axes.index('s')],
axes,
shape))
self.shape = shape
self.set_axes(axes)
def make_conv2d_space(shape, axes):
shape_axes = list(axes)
shape_axes.remove('b')
image_shape = tuple(shape[shape_axes.index(axis)]
for axis in (0, 1))
conv2d_axes = list(axes)
conv2d_axes.remove('s')
return Conv2DSpace(shape=image_shape,
num_channels=shape[shape_axes.index('c')],
axes=conv2d_axes)
conv2d_space = make_conv2d_space(shape, axes)
self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
self.storage_space = VectorSpace(dim=numpy.prod(shape))
def get_formatted_batch(self, batch, space):
"""
.. todo::
WRITEME
"""
return self.storage_space.np_format_as(batch, space)
def design_mat_to_topo_view(self, design_mat):
"""
Called by DenseDesignMatrix.get_formatted_view(), get_batch_topo()
"""
return self.storage_space.np_format_as(design_mat, self.topo_space)
def design_mat_to_weights_view(self, design_mat):
"""
Called by DenseDesignMatrix.get_weights_view()
"""
return self.design_mat_to_topo_view(design_mat)
def topo_view_to_design_mat(self, topo_batch):
"""
Used by `DenseDesignMatrix.set_topological_view()` and
`DenseDesignMatrix.get_design_mat()`.
"""
return self.topo_space.np_format_as(topo_batch, self.storage_space)
def view_shape(self):
"""
.. todo::
WRITEME
"""
return self.shape
def weights_view_shape(self):
"""
.. todo::
WRITEME
"""
return self.view_shape()
def set_axes(self, axes):
"""
.. todo::
WRITEME
"""
axes = tuple(axes)
if len(axes) != 5:
raise ValueError("Axes must have 5 elements; got %s" % str(axes))
for required_axis in ('b', 's', 0, 1, 'c'):
if required_axis not in axes:
raise ValueError("Axes must contain 'b', 's', 0, 1, and 'c'. "
"Got %s." % str(axes))
if axes.index('b') != 0:
raise ValueError("The 'b' axis must come first (axes = %s)." %
str(axes))
def get_batchless_axes(axes):
axes = list(axes)
axes.remove('b')
return tuple(axes)
if hasattr(self, 'axes'):
# Reorders the shape vector to match the new axis ordering.
assert hasattr(self, 'shape')
old_axes = get_batchless_axes(self.axes)
new_axes = get_batchless_axes(axes)
new_shape = tuple(self.shape[old_axes.index(a)] for a in new_axes)
self.shape = new_shape
self.axes = axes
|
pocketpimps/pocketpimps-app | refs/heads/master | node_modules/node-gyp/gyp/gyp_main.py | 1452 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
|
schumi2004/NOT_UPDATED_Sick-Beard-Dutch | refs/heads/dutch-tpb | lib/subliminal/core.py | 2 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .exceptions import DownloadFailedError
from .services import ServiceConfig
from .tasks import DownloadTask, ListTask
from .utils import get_keywords
from .videos import Episode, Movie, scan
from .language import Language
from collections import defaultdict
from itertools import groupby
import bs4
import guessit
import logging
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE',
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
'key_subtitles', 'group_by_video']
logger = logging.getLogger("subliminal")
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'itasa']
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)
def create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter):
"""Create a list of :class:`~subliminal.tasks.ListTask` from one or more paths using the given criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param set languages: languages to search for
:param list services: services to use for the search
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:return: the created tasks
:rtype: list of :class:`~subliminal.tasks.ListTask`
"""
scan_result = []
for p in paths:
scan_result.extend(scan(p, max_depth, scan_filter))
logger.debug(u'Found %d videos in %r with maximum depth %d' % (len(scan_result), paths, max_depth))
tasks = []
config = ServiceConfig(multi, cache_dir)
services = filter_services(services)
for video, detected_subtitles in scan_result:
detected_languages = set(s.language for s in detected_subtitles)
wanted_languages = languages.copy()
if not force and multi:
wanted_languages -= detected_languages
if not wanted_languages:
logger.debug(u'No need to list multi subtitles %r for %r because %r detected' % (languages, video, detected_languages))
continue
if not force and not multi and Language('Undetermined') in detected_languages:
logger.debug(u'No need to list single subtitles %r for %r because one detected' % (languages, video))
continue
logger.debug(u'Listing subtitles %r for %r with services %r' % (wanted_languages, video, services))
for service_name in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
service = mod.Service
if not service.check_validity(video, wanted_languages):
continue
task = ListTask(video, wanted_languages & service.languages, service_name, config)
logger.debug(u'Created task %r' % task)
tasks.append(task)
return tasks
def create_download_tasks(subtitles_by_video, languages, multi):
"""Create a list of :class:`~subliminal.tasks.DownloadTask` from a list results grouped by video
:param subtitles_by_video: :class:`~subliminal.tasks.ListTask` results with ordered subtitles
:type subtitles_by_video: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`]
:param languages: languages in preferred order
:type languages: :class:`~subliminal.language.language_list`
:param bool multi: download multiple languages for the same video
:return: the created tasks
:rtype: list of :class:`~subliminal.tasks.DownloadTask`
"""
tasks = []
for video, subtitles in subtitles_by_video.iteritems():
if not subtitles:
continue
if not multi:
task = DownloadTask(video, list(subtitles))
logger.debug(u'Created task %r' % task)
tasks.append(task)
continue
for _, by_language in groupby(subtitles, lambda s: languages.index(s.language)):
task = DownloadTask(video, list(by_language))
logger.debug(u'Created task %r' % task)
tasks.append(task)
return tasks
def consume_task(task, services=None):
"""Consume a task. If the ``services`` parameter is given, the function will attempt
to get the service from it. In case the service is not in ``services``, it will be initialized
and put in ``services``
:param task: task to consume
:type task: :class:`~subliminal.tasks.ListTask` or :class:`~subliminal.tasks.DownloadTask`
:param dict services: mapping between the service name and an instance of this service
:return: the result of the task
:rtype: list of :class:`~subliminal.subtitles.ResultSubtitle`
"""
if services is None:
services = {}
logger.info(u'Consuming %r' % task)
result = None
if isinstance(task, ListTask):
service = get_service(services, task.service, config=task.config)
result = service.list(task.video, task.languages)
elif isinstance(task, DownloadTask):
for subtitle in task.subtitles:
service = get_service(services, subtitle.service)
try:
service.download(subtitle)
result = [subtitle]
break
except DownloadFailedError:
logger.warning(u'Could not download subtitle %r, trying next' % subtitle)
continue
if result is None:
logger.error(u'No subtitles could be downloaded for video %r' % task.video)
return result
def matching_confidence(video, subtitle):
"""Compute the probability (confidence) that the subtitle matches the video
:param video: video to match
:type video: :class:`~subliminal.videos.Video`
:param subtitle: subtitle to match
:type subtitle: :class:`~subliminal.subtitles.Subtitle`
:return: the matching probability
:rtype: float
"""
guess = guessit.guess_file_info(subtitle.release, 'autodetect')
video_keywords = get_keywords(video.guess)
subtitle_keywords = get_keywords(guess) | subtitle.keywords
logger.debug(u'Video keywords %r - Subtitle keywords %r' % (video_keywords, subtitle_keywords))
replacement = {'keywords': len(video_keywords & subtitle_keywords)}
if isinstance(video, Episode):
replacement.update({'series': 0, 'season': 0, 'episode': 0})
matching_format = '{series:b}{season:b}{episode:b}{keywords:03b}'
best = matching_format.format(series=1, season=1, episode=1, keywords=len(video_keywords))
if guess['type'] in ['episode', 'episodesubtitle']:
if 'series' in guess and guess['series'].lower() == video.series.lower():
replacement['series'] = 1
if 'season' in guess and guess['season'] == video.season:
replacement['season'] = 1
if 'episodeNumber' in guess and guess['episodeNumber'] == video.episode:
replacement['episode'] = 1
elif isinstance(video, Movie):
replacement.update({'title': 0, 'year': 0})
matching_format = '{title:b}{year:b}{keywords:03b}'
best = matching_format.format(title=1, year=1, keywords=len(video_keywords))
if guess['type'] in ['movie', 'moviesubtitle']:
if 'title' in guess and guess['title'].lower() == video.title.lower():
replacement['title'] = 1
if 'year' in guess and guess['year'] == video.year:
replacement['year'] = 1
else:
logger.debug(u'Not able to compute confidence for %r' % video)
return 0.0
logger.debug(u'Found %r' % replacement)
confidence = float(int(matching_format.format(**replacement), 2)) / float(int(best, 2))
logger.info(u'Computed confidence %.4f for %r and %r' % (confidence, video, subtitle))
return confidence
def get_service(services, service_name, config=None):
"""Get a service from its name in the service dict with the specified config.
If the service does not exist in the service dict, it is created and added to the dict.
:param dict services: dict where to get existing services or put created ones
:param string service_name: name of the service to get
:param config: config to use for the service
:type config: :class:`~subliminal.services.ServiceConfig` or None
:return: the corresponding service
:rtype: :class:`~subliminal.services.ServiceBase`
"""
if service_name not in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
services[service_name] = mod.Service()
services[service_name].init()
services[service_name].config = config
return services[service_name]
def key_subtitles(subtitle, video, languages, services, order):
"""Create a key to sort subtitle using the given order
:param subtitle: subtitle to sort
:type subtitle: :class:`~subliminal.subtitles.ResultSubtitle`
:param video: video to match
:type video: :class:`~subliminal.videos.Video`
:param list languages: languages in preferred order
:param list services: services in preferred order
:param order: preferred order for subtitles sorting
:type list: list of :data:`LANGUAGE_INDEX`, :data:`SERVICE_INDEX`, :data:`SERVICE_CONFIDENCE`, :data:`MATCHING_CONFIDENCE`
:return: a key ready to use for subtitles sorting
:rtype: int
"""
key = ''
for sort_item in order:
if sort_item == LANGUAGE_INDEX:
key += '{0:03d}'.format(len(languages) - languages.index(subtitle.language) - 1)
key += '{0:01d}'.format(subtitle.language == languages[languages.index(subtitle.language)])
elif sort_item == SERVICE_INDEX:
key += '{0:02d}'.format(len(services) - services.index(subtitle.service) - 1)
elif sort_item == SERVICE_CONFIDENCE:
key += '{0:04d}'.format(int(subtitle.confidence * 1000))
elif sort_item == MATCHING_CONFIDENCE:
confidence = 0
if subtitle.release:
confidence = matching_confidence(video, subtitle)
key += '{0:04d}'.format(int(confidence * 1000))
return int(key)
def group_by_video(list_results):
"""Group the results of :class:`ListTasks <subliminal.tasks.ListTask>` into a
dictionary of :class:`~subliminal.videos.Video` => :class:`~subliminal.subtitles.Subtitle`
:param list_results:
:type list_results: list of result of :class:`~subliminal.tasks.ListTask`
:return: subtitles grouped by videos
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`]
"""
result = defaultdict(list)
for video, subtitles in list_results:
result[video] += subtitles or []
return result
def filter_services(services):
"""Filter out services that are not available because of a missing feature
:param list services: service names to filter
:return: a copy of the initial list of service names without unavailable ones
:rtype: list
"""
filtered_services = services[:]
for service_name in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
service = mod.Service
if service.required_features is not None and bs4.builder_registry.lookup(*service.required_features) is None:
logger.warning(u'Service %s not available: none of available features could be used. One of %r required' % (service_name, service.required_features))
filtered_services.remove(service_name)
return filtered_services
|
gooddata/openstack-nova | refs/heads/master | nova/ipv6/__init__.py | 125 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.ipv6.api import * # noqa
|
flyher/pymo | refs/heads/master | symbian/PythonForS60/module-repo/standard-modules/weakref.py | 12 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://python.sourceforge.net/peps/pep-0205.html
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
mushtaqak/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/users/views.py | 19 | """
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and
access other resources the user has permissions for.
Users are redirected to this endpoint after logging in.
You can use the **course_enrollments** value in
the response to get a list of courses the user is enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}
**Response Values**
* id: The ID of the user.
* username: The username of the currently logged in user.
* email: The email address of the currently logged in user.
* name: The full name of the currently logged in user.
* course_enrollments: The URI to list the courses the currently logged
in user is enrolled in.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Case**
Get or update the ID of the module that the specified user last visited in the specified course.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
body:
last_visited_module_id={module_id}
modification_date={date}
The modification_date is optional. If it is present, the update will only take effect
if the modification_date is later than the modification_date saved on the server.
**Response Values**
* last_visited_module_id: The ID of the last module visited by the user in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses the currently logged in user is
enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
* created: The date the course was created.
* mode: The type of certificate registration for this course: honor or
certified.
* is_active: Whether the course is currently active; true or false.
* certificate: Information about the user's earned certificate in the course.
* url: URL to the downloadable version of the certificate, if exists.
* course: A collection of data about the course:
* course_updates: The URI to get data for course updates.
* number: The course number.
* org: The organization that created the course.
* video_outline: The URI to get the list of all vides the user can
access in the course.
* id: The unique ID of the course.
* subscription_id: A unique "clean" (alphanumeric with '_') ID of the course.
* latest_updates: Reserved for future use.
* end: The end date of the course.
* name: The name of the course.
* course_handouts: The URI to get data for course handouts.
* start: The data and time the course starts.
* course_image: The path to the course image.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course_overview and
is_mobile_available_for_user(self.request.user, enrollment.course_overview)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
|
sinpantuflas/aubio | refs/heads/master | waflib/Tools/python.py | 89 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys
from waflib import Utils,Options,Errors,Logs
from waflib.TaskGen import extension,before_method,after_method,feature
from waflib.Configure import conf
FRAG='''
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main(int argc, char **argv)
{
(void)argc; (void)argv;
Py_Initialize();
Py_Finalize();
return 0;
}
'''
INST='''
import sys, py_compile
py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3])
'''
DISTUTILS_IMP=['from distutils.sysconfig import get_config_var, get_python_lib']
@extension('.py')
def process_py(self,node):
try:
if not self.bld.is_install:
return
except AttributeError:
return
try:
if not self.install_path:
return
except AttributeError:
self.install_path='${PYTHONDIR}'
def inst_py(ctx):
install_from=getattr(self,'install_from',None)
if install_from:
install_from=self.path.find_dir(install_from)
install_pyfile(self,node,install_from)
self.bld.add_post_fun(inst_py)
def install_pyfile(self,node,install_from=None):
from_node=install_from or node.parent
tsk=self.bld.install_as(self.install_path+'/'+node.path_from(from_node),node,postpone=False)
path=tsk.get_install_path()
if self.bld.is_install<0:
Logs.info("+ removing byte compiled python files")
for x in'co':
try:
os.remove(path+x)
except OSError:
pass
if self.bld.is_install>0:
try:
st1=os.stat(path)
except OSError:
Logs.error('The python file is missing, this should not happen')
for x in['c','o']:
do_inst=self.env['PY'+x.upper()]
try:
st2=os.stat(path+x)
except OSError:
pass
else:
if st1.st_mtime<=st2.st_mtime:
do_inst=False
if do_inst:
lst=(x=='o')and[self.env['PYFLAGS_OPT']]or[]
(a,b,c)=(path,path+x,tsk.get_install_path(destdir=False)+x)
argv=self.env['PYTHON']+lst+['-c',INST,a,b,c]
Logs.info('+ byte compiling %r'%(path+x))
env=self.env.env or None
ret=Utils.subprocess.Popen(argv,env=env).wait()
if ret:
raise Errors.WafError('py%s compilation failed %r'%(x,path))
@feature('py')
def feature_py(self):
pass
@feature('pyext')
@before_method('propagate_uselib_vars','apply_link')
@after_method('apply_bundle')
def init_pyext(self):
self.uselib=self.to_list(getattr(self,'uselib',[]))
if not'PYEXT'in self.uselib:
self.uselib.append('PYEXT')
self.env.cshlib_PATTERN=self.env.cxxshlib_PATTERN=self.env.macbundle_PATTERN=self.env.pyext_PATTERN
self.env.fcshlib_PATTERN=self.env.dshlib_PATTERN=self.env.pyext_PATTERN
try:
if not self.install_path:
return
except AttributeError:
self.install_path='${PYTHONARCHDIR}'
@feature('pyext')
@before_method('apply_link','apply_bundle')
def set_bundle(self):
if Utils.unversioned_sys_platform()=='darwin':
self.mac_bundle=True
@before_method('propagate_uselib_vars')
@feature('pyembed')
def init_pyembed(self):
self.uselib=self.to_list(getattr(self,'uselib',[]))
if not'PYEMBED'in self.uselib:
self.uselib.append('PYEMBED')
@conf
def get_python_variables(self,variables,imports=None):
if not imports:
try:
imports=self.python_imports
except AttributeError:
imports=DISTUTILS_IMP
program=list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))"%v)
os_env=dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
try:
out=self.cmd_and_log(self.env.PYTHON+['-c','\n'.join(program)],env=os_env)
except Errors.WafError:
self.fatal('The distutils module is unusable: install "python-devel"?')
self.to_log(out)
return_values=[]
for s in out.split('\n'):
s=s.strip()
if not s:
continue
if s=='None':
return_values.append(None)
elif(s[0]=="'"and s[-1]=="'")or(s[0]=='"'and s[-1]=='"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else:break
return return_values
@conf
def check_python_headers(conf):
env=conf.env
if not env['CC_NAME']and not env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not env['PYTHON_VERSION']:
conf.check_python_version()
pybin=conf.env.PYTHON
if not pybin:
conf.fatal('Could not find the python executable')
v='prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS'.split()
try:
lst=conf.get_python_variables(["get_config_var('%s') or ''"%x for x in v])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
vals=['%s = %r'%(x,y)for(x,y)in zip(v,lst)]
conf.to_log("Configuration returned from %r:\n%r\n"%(pybin,'\n'.join(vals)))
dct=dict(zip(v,lst))
x='MACOSX_DEPLOYMENT_TARGET'
if dct[x]:
conf.env[x]=conf.environ[x]=dct[x]
env['pyext_PATTERN']='%s'+dct['SO']
all_flags=dct['LDFLAGS']+' '+dct['CFLAGS']
conf.parse_flags(all_flags,'PYEMBED')
all_flags=dct['LDFLAGS']+' '+dct['LDSHARED']+' '+dct['CFLAGS']
conf.parse_flags(all_flags,'PYEXT')
result=None
for name in('python'+env['PYTHON_VERSION'],'python'+env['PYTHON_VERSION']+'m','python'+env['PYTHON_VERSION'].replace('.','')):
if not result and env['LIBPATH_PYEMBED']:
path=env['LIBPATH_PYEMBED']
conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in LIBPATH_PYEMBED'%name)
if not result and dct['LIBDIR']:
path=[dct['LIBDIR']]
conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in LIBDIR'%name)
if not result and dct['LIBPL']:
path=[dct['LIBPL']]
conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in python_LIBPL'%name)
if not result:
path=[os.path.join(dct['prefix'],"libs")]
conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in $prefix/libs'%name)
if result:
break
if result:
env['LIBPATH_PYEMBED']=path
env.append_value('LIB_PYEMBED',[name])
else:
conf.to_log("\n\n### LIB NOT FOUND\n")
if(Utils.is_win32 or sys.platform.startswith('os2')or dct['Py_ENABLE_SHARED']):
env['LIBPATH_PYEXT']=env['LIBPATH_PYEMBED']
env['LIB_PYEXT']=env['LIB_PYEMBED']
num='.'.join(env['PYTHON_VERSION'].split('.')[:2])
conf.find_program([''.join(pybin)+'-config','python%s-config'%num,'python-config-%s'%num,'python%sm-config'%num],var='PYTHON_CONFIG',mandatory=False)
includes=[]
if conf.env.PYTHON_CONFIG:
for incstr in conf.cmd_and_log([conf.env.PYTHON_CONFIG,'--includes']).strip().split():
if(incstr.startswith('-I')or incstr.startswith('/I')):
incstr=incstr[2:]
if incstr not in includes:
includes.append(incstr)
conf.to_log("Include path for Python extensions (found via python-config --includes): %r\n"%(includes,))
env['INCLUDES_PYEXT']=includes
env['INCLUDES_PYEMBED']=includes
else:
conf.to_log("Include path for Python extensions ""(found via distutils module): %r\n"%(dct['INCLUDEPY'],))
env['INCLUDES_PYEXT']=[dct['INCLUDEPY']]
env['INCLUDES_PYEMBED']=[dct['INCLUDEPY']]
if env['CC_NAME']=='gcc':
env.append_value('CFLAGS_PYEMBED',['-fno-strict-aliasing'])
env.append_value('CFLAGS_PYEXT',['-fno-strict-aliasing'])
if env['CXX_NAME']=='gcc':
env.append_value('CXXFLAGS_PYEMBED',['-fno-strict-aliasing'])
env.append_value('CXXFLAGS_PYEXT',['-fno-strict-aliasing'])
if env.CC_NAME=="msvc":
from distutils.msvccompiler import MSVCCompiler
dist_compiler=MSVCCompiler()
dist_compiler.initialize()
env.append_value('CFLAGS_PYEXT',dist_compiler.compile_options)
env.append_value('CXXFLAGS_PYEXT',dist_compiler.compile_options)
env.append_value('LINKFLAGS_PYEXT',dist_compiler.ldflags_shared)
try:
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',uselib='PYEMBED',fragment=FRAG,errmsg=':-(')
except conf.errors.ConfigurationError:
xx=conf.env.CXX_NAME and'cxx'or'c'
flags=['--cflags','--libs','--ldflags']
for f in flags:
conf.check_cfg(msg='Asking python-config for pyembed %s flags'%f,path=conf.env.PYTHON_CONFIG,package='',uselib_store='PYEMBED',args=[f])
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',msg='Getting pyembed flags from python-config',fragment=FRAG,errmsg='Could not build a python embedded interpreter',features='%s %sprogram pyembed'%(xx,xx))
for f in flags:
conf.check_cfg(msg='Asking python-config for pyext %s flags'%f,path=conf.env.PYTHON_CONFIG,package='',uselib_store='PYEXT',args=[f])
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',msg='Getting pyext flags from python-config',features='%s %sshlib pyext'%(xx,xx),fragment=FRAG,errmsg='Could not build python extensions')
@conf
def check_python_version(conf,minver=None):
assert minver is None or isinstance(minver,tuple)
pybin=conf.env['PYTHON']
if not pybin:
conf.fatal('could not find the python executable')
cmd=pybin+['-c','import sys\nfor x in sys.version_info: print(str(x))']
Logs.debug('python: Running python command %r'%cmd)
lines=conf.cmd_and_log(cmd).split()
assert len(lines)==5,"found %i lines, expected 5: %r"%(len(lines),lines)
pyver_tuple=(int(lines[0]),int(lines[1]),int(lines[2]),lines[3],int(lines[4]))
result=(minver is None)or(pyver_tuple>=minver)
if result:
pyver='.'.join([str(x)for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION']=pyver
if'PYTHONDIR'in conf.environ:
pydir=conf.environ['PYTHONDIR']
else:
if Utils.is_win32:
(python_LIBDEST,pydir)=conf.get_python_variables(["get_config_var('LIBDEST') or ''","get_python_lib(standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
else:
python_LIBDEST=None
(pydir,)=conf.get_python_variables(["get_python_lib(standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST=os.path.join(conf.env['LIBDIR'],"python"+pyver)
else:
python_LIBDEST=os.path.join(conf.env['PREFIX'],"lib","python"+pyver)
if'PYTHONARCHDIR'in conf.environ:
pyarchdir=conf.environ['PYTHONARCHDIR']
else:
(pyarchdir,)=conf.get_python_variables(["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
if not pyarchdir:
pyarchdir=pydir
if hasattr(conf,'define'):
conf.define('PYTHONDIR',pydir)
conf.define('PYTHONARCHDIR',pyarchdir)
conf.env['PYTHONDIR']=pydir
conf.env['PYTHONARCHDIR']=pyarchdir
pyver_full='.'.join(map(str,pyver_tuple[:3]))
if minver is None:
conf.msg('Checking for python version',pyver_full)
else:
minver_str='.'.join(map(str,minver))
conf.msg('Checking for python version',pyver_tuple,">= %s"%(minver_str,)and'GREEN'or'YELLOW')
if not result:
conf.fatal('The python version is too old, expecting %r'%(minver,))
PYTHON_MODULE_TEMPLATE='''
import %s as current_module
version = getattr(current_module, '__version__', None)
if version is not None:
print(str(version))
else:
print('unknown version')
'''
@conf
def check_python_module(conf,module_name,condition=''):
msg='Python module %s'%module_name
if condition:
msg='%s (%s)'%(msg,condition)
conf.start_msg(msg)
try:
ret=conf.cmd_and_log(conf.env['PYTHON']+['-c',PYTHON_MODULE_TEMPLATE%module_name])
except Exception:
conf.end_msg(False)
conf.fatal('Could not find the python module %r'%module_name)
ret=ret.strip()
if condition:
conf.end_msg(ret)
if ret=='unknown version':
conf.fatal('Could not check the %s version'%module_name)
from distutils.version import LooseVersion
def num(*k):
if isinstance(k[0],int):
return LooseVersion('.'.join([str(x)for x in k]))
else:
return LooseVersion(k[0])
d={'num':num,'ver':LooseVersion(ret)}
ev=eval(condition,{},d)
if not ev:
conf.fatal('The %s version does not satisfy the requirements'%module_name)
else:
if ret=='unknown version':
conf.end_msg(True)
else:
conf.end_msg(ret)
def configure(conf):
try:
conf.find_program('python',var='PYTHON')
except conf.errors.ConfigurationError:
Logs.warn("could not find a python executable, setting to sys.executable '%s'"%sys.executable)
conf.env.PYTHON=sys.executable
if conf.env.PYTHON!=sys.executable:
Logs.warn("python executable %r differs from system %r"%(conf.env.PYTHON,sys.executable))
conf.env.PYTHON=conf.cmd_to_list(conf.env.PYTHON)
v=conf.env
v['PYCMD']='"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS']=''
v['PYFLAGS_OPT']='-O'
v['PYC']=getattr(Options.options,'pyc',1)
v['PYO']=getattr(Options.options,'pyo',1)
def options(opt):
opt.add_option('--nopyc',action='store_false',default=1,help='Do not install bytecode compiled .pyc files (configuration) [Default:install]',dest='pyc')
opt.add_option('--nopyo',action='store_false',default=1,help='Do not install optimised compiled .pyo files (configuration) [Default:install]',dest='pyo')
|
ldjebran/robottelo | refs/heads/master | tests/foreman/ui/test_product.py | 3 | """Test class for Products UI
:Requirement: Product
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: ContentManagement
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from datetime import timedelta
from fauxfactory import gen_choice
from nailgun import entities
from robottelo.constants import FAKE_1_YUM_REPO, REPO_TYPE, VALID_GPG_KEY_FILE, SYNC_INTERVAL
from robottelo.datafactory import gen_string, valid_data_list, valid_cron_expressions
from robottelo.decorators import fixture, parametrize, tier2
from robottelo.helpers import read_data_file
@fixture(scope='module')
def module_org():
return entities.Organization().create()
@tier2
def test_positive_end_to_end(session, module_org):
"""Perform end to end testing for product component
:id: d0e1f0d1-2380-4508-b270-62c1d8b3e2ff
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
:CaseImportance: Critical
"""
product_name = gen_string('alpha')
new_product_name = gen_string('alpha')
product_label = gen_string('alpha')
product_description = gen_string('alpha')
gpg_key = entities.GPGKey(
content=read_data_file(VALID_GPG_KEY_FILE),
organization=module_org
).create()
sync_plan = entities.SyncPlan(organization=module_org).create()
with session:
# Create new product using different parameters
session.product.create({
'name': product_name,
'label': product_label,
'gpg_key': gpg_key.name,
'sync_plan': sync_plan.name,
'description': product_description,
})
assert session.product.search(product_name)[0]['Name'] == product_name
# Verify that created entity has expected parameters
product_values = session.product.read(product_name)
assert product_values['details']['name'] == product_name
assert product_values['details']['label'] == product_label
assert product_values['details']['gpg_key'] == gpg_key.name
assert product_values['details']['description'] == product_description
assert product_values['details']['sync_plan'] == sync_plan.name
# Update a product with a different name
session.product.update(
product_name, {'details.name': new_product_name}
)
assert session.product.search(product_name)[0]['Name'] != product_name
assert session.product.search(new_product_name)[0]['Name'] == new_product_name
# Add a repo to product
session.repository.create(
new_product_name,
{
'name': gen_string('alpha'),
'repo_type': REPO_TYPE['yum'],
'repo_content.upstream_url': FAKE_1_YUM_REPO,
}
)
# Synchronize the product
result = session.product.synchronize(new_product_name)
assert result['result'] == 'success'
product_values = session.product.read(new_product_name)
assert product_values['details']['repos_count'] == '1'
assert product_values['details']['sync_state'] == 'Syncing Complete.'
# Delete product
session.product.delete(new_product_name)
assert session.product.search(new_product_name)[0]['Name'] != new_product_name
@parametrize('product_name', **valid_data_list('ui'))
@tier2
def test_positive_create_in_different_orgs(session, product_name):
"""Create Product with same name but in different organizations
:id: 469fc036-a48a-4c0a-9da9-33e73f903479
:expectedresults: Product is created successfully in both
organizations.
:CaseLevel: Integration
"""
orgs = [entities.Organization().create() for _ in range(2)]
with session:
for org in orgs:
session.organization.select(org_name=org.name)
session.product.create(
{'name': product_name, 'description': org.name})
assert session.product.search(
product_name)[0]['Name'] == product_name
product_values = session.product.read(product_name)
assert product_values['details']['description'] == org.name
@tier2
def test_positive_product_create_with_create_sync_plan(session, module_org):
"""Perform Sync Plan Create from Product Create Page
:id: 4a87b533-12b6-4d4e-8a99-4bb95efc4321
:expectedresults: Ensure sync get created and assigned to Product.
:CaseLevel: Integration
:CaseImportance: medium
"""
product_name = gen_string('alpha')
product_description = gen_string('alpha')
gpg_key = entities.GPGKey(
content=read_data_file(VALID_GPG_KEY_FILE),
organization=module_org
).create()
plan_name = gen_string('alpha')
description = gen_string('alpha')
cron_expression = gen_choice(valid_cron_expressions())
with session:
startdate = (
session.browser.get_client_datetime() + timedelta(minutes=10))
sync_plan_values = {
'name': plan_name,
'interval': SYNC_INTERVAL['custom'],
'description': description,
'cron_expression': cron_expression,
'date_time.start_date': startdate.strftime("%Y-%m-%d"),
'date_time.hours': startdate.strftime('%H'),
'date_time.minutes': startdate.strftime('%M'),
}
session.product.create({
'name': product_name,
'gpg_key': gpg_key.name,
'description': product_description,
}, sync_plan_values=sync_plan_values)
assert session.product.search(product_name)[0]['Name'] == product_name
product_values = session.product.read(product_name, widget_names='details')
assert product_values['details']['name'] == product_name
assert product_values['details']['sync_plan'] == plan_name
# Delete product
session.product.delete(product_name)
assert session.product.search(product_name)[0]['Name'] != product_name
|
rs2/pandas | refs/heads/master | pandas/tests/generic/methods/test_reorder_levels.py | 2 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestReorderLevels:
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_reorder_levels(self, klass):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = df if klass is DataFrame else df["A"]
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
tm.assert_equal(result, expected)
def test_reorder_levels_swaplevel_equivalence(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.reorder_levels(["month", "day", "year"])
expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = ymd["A"].reorder_levels(["month", "day", "year"])
expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match="hierarchical axis"):
ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match="Too many levels"):
ymd.index.reorder_levels([1, 2, 3])
|
neumerance/cloudloon2 | refs/heads/master | .venv/lib/python2.7/site-packages/novaclient/v1_1/security_group_rules.py | 7 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Security group rules interface (1.1 extension).
"""
from novaclient import base
from novaclient import exceptions
class SecurityGroupRule(base.Resource):
def __str__(self):
return str(self.id)
def delete(self):
self.manager.delete(self)
class SecurityGroupRuleManager(base.Manager):
resource_class = SecurityGroupRule
def create(self, parent_group_id, ip_protocol=None, from_port=None,
to_port=None, cidr=None, group_id=None):
"""
Create a security group rule
:param ip_protocol: IP protocol, one of 'tcp', 'udp' or 'icmp'
:param from_port: Source port
:param to_port: Destination port
:param cidr: Destination IP address(es) in CIDR notation
:param group_id: Security group id (int)
:param parent_group_id: Parent security group id (int)
"""
try:
from_port = int(from_port)
except (TypeError, ValueError):
raise exceptions.CommandError("From port must be an integer.")
try:
to_port = int(to_port)
except (TypeError, ValueError):
raise exceptions.CommandError("To port must be an integer.")
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exceptions.CommandError("Ip protocol must be 'tcp', 'udp', "
"or 'icmp'.")
body = {"security_group_rule": {
"ip_protocol": ip_protocol,
"from_port": from_port,
"to_port": to_port,
"cidr": cidr,
"group_id": group_id,
"parent_group_id": parent_group_id}}
return self._create('/os-security-group-rules', body,
'security_group_rule')
def delete(self, rule):
"""
Delete a security group rule
:param rule: The security group rule to delete (ID or Class)
"""
self._delete('/os-security-group-rules/%s' % base.getid(rule))
|
mancoast/CPythonPyc_test | refs/heads/master | cpython/241_test_shelve.py | 15 | import os
import unittest
import shelve
import glob
from test import test_support
class TestCase(unittest.TestCase):
fn = "shelftemp" + os.extsep + "db"
def test_ascii_file_shelf(self):
try:
s = shelve.open(self.fn, binary=False)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_binary_file_shelf(self):
try:
s = shelve.open(self.fn, binary=True)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_proto2_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=2)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_in_memory_shelf(self):
d1 = {}
s = shelve.Shelf(d1, binary=False)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = {}
s = shelve.Shelf(d2, binary=True)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertNotEqual(d1, d2)
def test_mutable_entry(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = {}
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf({}, **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'binary':False}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'binary':True}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'binary':False}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'binary':True}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
test_support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
|
peterjoel/servo | refs/heads/master | tests/wpt/web-platform-tests/xhr/resources/echo-headers.py | 71 | def main(request, response):
response.writer.write_status(200)
response.writer.write_header("Content-Type", "text/plain")
response.writer.end_headers()
response.writer.write(str(request.raw_headers))
response.close_connection = True
|
krieger-od/nwjs_chromium.src | refs/heads/master | tools/telemetry/telemetry/core/memory_cache_http_server_unittest.py | 21 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.unittest_util import tab_test_case
class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
def setUp(self):
super(MemoryCacheHTTPServerTest, self).setUp()
self._test_filename = 'bear.webm'
_test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
self._test_file_size = os.stat(_test_file).st_size
def testBasicHostingAndRangeRequests(self):
self.Navigate('blank.html')
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
x = x.strip()
# Test basic html hosting.
self.assertEquals(x, 'Hello world')
file_size = self._test_file_size
last_byte = file_size - 1
# Test byte range request: no end byte.
self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
# Test byte range request: greater than zero start byte.
self.CheckContentHeaders('100-', '100-%d' % last_byte,
file_size - 100)
# Test byte range request: explicit byte range.
self.CheckContentHeaders('2-500', '2-500', '499')
# Test byte range request: no start byte.
self.CheckContentHeaders('-228',
'%d-%d' % (file_size - 228, last_byte),
'228')
# Test byte range request: end byte less than start byte.
self.CheckContentHeaders('100-5', '100-%d' % last_byte,
file_size - 100)
def CheckContentHeaders(self, content_range_request, content_range_response,
content_length_response):
self._tab.ExecuteJavaScript("""
var loaded = false;
var xmlhttp = new XMLHttpRequest();
xmlhttp.onload = function(e) {
loaded = true;
};
// Avoid cached content by appending unique URL param.
xmlhttp.open('GET', "%s?t=" + Date.now(), true);
xmlhttp.setRequestHeader('Range', 'bytes=%s');
xmlhttp.send();
""" % (self.UrlOfUnittestFile(self._test_filename),
content_range_request))
self._tab.WaitForJavaScriptExpression('loaded', 5)
content_range = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Range");')
content_range_response = 'bytes %s/%d' % (
content_range_response, self._test_file_size)
self.assertEquals(content_range, content_range_response)
content_length = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Length");')
self.assertEquals(content_length, str(content_length_response))
|
petewarden/tensorflow | refs/heads/master | tensorflow/python/data/util/sparse.py | 14 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python dataset sparse tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import sparse_ops
def any_sparse(classes):
"""Checks for sparse tensor.
Args:
classes: a structure of objects that identify the dataset item classes
Returns:
`True` if `classes` contains a sparse tensor type and `False` otherwise.
"""
return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes))
def as_dense_shapes(shapes, classes):
"""Converts sparse tensor shapes to their physical shapes.
Args:
shapes: a structure of shapes to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `shapes`, containing
`tensor_shape.unknown_shape()` at positions where `classes` contains
`tf.sparse.SparseTensor` and matching contents of `shapes` otherwise
"""
ret = nest.pack_sequence_as(shapes, [
tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape
for shape, c in zip(nest.flatten(shapes), nest.flatten(classes))
])
return ret
def as_dense_types(types, classes):
"""Converts sparse tensor types to `dtypes.variant`.
Args:
types: a structure of types to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `types`, containing
`dtypes.variant` at positions where `classes` contains
`tf.sparse.SparseTensor` and matching contents of `types` otherwise
"""
ret = nest.pack_sequence_as(types, [
dtypes.variant if c is sparse_tensor.SparseTensor else ty
for ty, c in zip(nest.flatten(types), nest.flatten(classes))
])
return ret
def deserialize_sparse_tensors(tensors, types, shapes, classes):
"""Deserializes sparse tensors.
Args:
tensors: a structure of tensors to deserialize.
types: a structure that holds information about types of `tensors`
shapes: a structure that holds information about shapes of `tensors`
classes: a structure of objects that identify the dataset item classes
Returns:
`tensors` with any serialized sparse tensors replaced by their deserialized
version.
"""
ret = nest.pack_sequence_as(types, [
sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims)
if c is sparse_tensor.SparseTensor else tensor
for (tensor, ty, shape, c) in zip(
nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes),
nest.flatten(classes))
])
return ret
def get_classes(tensors):
"""Gets classes for a structure of tensors.
Args:
tensors: the tensor structure to get classes for.
Returns:
a structure matching the nested structure of `tensors`, containing
`tf.sparse.SparseTensor` at positions where `tensors` contains a sparse
tensor and `tf.Tensor` otherwise.
"""
return nest.pack_sequence_as(tensors, [
sparse_tensor.SparseTensor
if isinstance(tensor, sparse_tensor.SparseTensor) else ops.Tensor
for tensor in nest.flatten(tensors)
])
def serialize_many_sparse_tensors(tensors):
"""Serializes many sparse tensors into a batch.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by the serialized batch.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
if sparse_tensor.is_sparse(tensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
def serialize_sparse_tensors(tensors):
"""Serializes sparse tensors.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by their serialized version.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant)
if isinstance(tensor, sparse_tensor.SparseTensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
|
DayGitH/Python-Challenges | refs/heads/master | DailyProgrammer/20120502C.py | 1 | """
If you were to generate all permutations of the first three letters of the alphabet ("a", "b" and "c") and then sort
them, you would get the following list of 6 permutations:
abc
acb
bac
bca
cab
cba
As you can see, the fourth permutation in a sorted list of all the permutations of "a", "b" and "c" is "bca".
Similarly, if we wanted the 30th permutation in a sorted list of all permutations of the first five letters of the
alphabet (i.e. "abcde"), you get "baedc".
Define a function f(n,p) that generates the permutation number p in a sorted list of all permutations of the n first
letters of the alphabet. So, for instance:
f(3, 4) = "bca"
f(5, 30) = "baedc"
f(7, 1000) = "bdcfega"
f(8, 20000) = "dhfebagc"
Find f(11, 20000000)
Bonus:
Find f(20, 10^18 )
"""
import itertools
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def factorial(n):
if n == 1:
return 1
else:
return n * factorial(n-1)
def f(a, b):
answer = []
C = a
index = b
fact = factorial(C)
print(index)
total_pos = [i for i in range(C)]
print(total_pos)
for n in range(C, 1, -1):
if fact != 2:
fact = int(fact/n)
div = int((index-1)/fact)
else:
div = int(index/fact)
answer.append(str(total_pos[div]))
total_pos.pop(div)
index -= (div * fact)
answer.append(str(total_pos[0]))
conv = lambda x: alphabet[int(x)]
return ''.join([conv(i) for i in answer])
def main():
""" Solution based on previous solution of Euler Problem 24 """
print(f(20, 10**18))
if __name__ == '__main__':
main()
|
pjsip/pjproject | refs/heads/master | tests/pjsua/scripts-recvfrom/230_reg_bad_fail_stale_true.py | 42 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
# In this test we simulate broken server, where it always sends
# stale=true with all 401 responses. We should expect pjsip to
# retry the authentication until PJSIP_MAX_STALE_COUNT is
# exceeded. When pjsip retries the authentication, it should
# use the new nonce from server
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("First retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("Second retry retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"]
)
req4 = sip.RecvfromTransaction("Third retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"3\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"],
expect="PJSIP_EAUTHSTALECOUNT"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ",
pjsua, [req1, req2, req3, req4])
|
flexiant/qemu | refs/heads/master | scripts/tracetool/backend/ftrace.py | 92 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ftrace built-in backend.
"""
__author__ = "Eiichi Tsukata <eiichi.tsukata.xh@hitachi.com>"
__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def c(events):
pass
def h(events):
out('#include "trace/ftrace.h"',
'#include "trace/control.h"',
'',
)
for e in events:
argnames = ", ".join(e.args.names())
if len(e.args) > 0:
argnames = ", " + argnames
out('static inline void trace_%(name)s(%(args)s)',
'{',
' char ftrace_buf[MAX_TRACE_STRLEN];',
' int unused __attribute__ ((unused));',
' int trlen;',
' bool _state = trace_event_get_state(%(event_id)s);',
' if (_state) {',
' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
'}',
name = e.name,
args = e.args,
event_id = "TRACE_" + e.name.upper(),
fmt = e.fmt.rstrip("\n"),
argnames = argnames,
)
|
mdanielwork/intellij-community | refs/heads/master | python/testData/resolve/multiFile/reimportStar/django/db/models/fields/__init__.py | 83 | class CharField:
pass
|
simbs/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/tests/test_textannotation.py | 83 | # -*- coding: utf-8 -*-
"Test for Annotation Xmodule functional logic."
import unittest
from mock import Mock
from lxml import etree
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.textannotation_module import TextAnnotationModule
from . import get_test_system
class TextAnnotationModuleTestCase(unittest.TestCase):
''' text Annotation Module Test Case '''
sample_xml = '''
<annotatable>
<instructions><p>Test Instructions.</p></instructions>
<p>
One Fish. Two Fish.
Red Fish. Blue Fish.
Oh the places you'll go!
</p>
</annotatable>
'''
def setUp(self):
"""
Makes sure that the Module is declared and mocked with the sample xml above.
"""
super(TextAnnotationModuleTestCase, self).setUp()
# return anything except None to test LMS
def test_real_user(useless):
useless_user = Mock(email='fake@fake.com', id=useless)
return useless_user
# test to make sure that role is checked in LMS
def test_user_role():
return 'staff'
self.system = get_test_system()
self.system.get_real_user = test_real_user
self.system.get_user_role = test_user_role
self.system.anonymous_student_id = None
self.mod = TextAnnotationModule(
Mock(),
self.system,
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, None)
)
def test_extract_instructions(self):
"""
Tests to make sure that the instructions are correctly pulled from the sample xml above.
It also makes sure that if no instructions exist, that it does in fact return nothing.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Test Instructions.</p></div>"
actual_xml = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNone(actual)
def test_student_view(self):
"""
Tests the function that passes in all the information in the context
that will be used in templates/textannotation.html
"""
context = self.mod.student_view({}).content
for key in ['display_name',
'tag',
'source',
'instructions_html',
'content_html',
'annotation_storage',
'token',
'diacritic_marks',
'default_tab',
'annotation_mode',
'is_course_staff']:
self.assertIn(key, context)
|
samsu/neutron | refs/heads/master | tests/unit/cisco/cfg_agent/test_csr1kv_routing_driver.py | 17 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
import netaddr
from neutron.common import constants as l3_constants
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
cisco_csr1kv_snippets as snippets)
sys.modules['ncclient'] = mock.MagicMock()
sys.modules['ciscoconfparse'] = mock.MagicMock()
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
csr1kv_routing_driver as csr_driver)
from neutron.plugins.cisco.cfg_agent.service_helpers import routing_svc_helper
_uuid = uuidutils.generate_uuid
FAKE_ID = _uuid()
PORT_ID = _uuid()
class TestCSR1kvRouting(base.BaseTestCase):
def setUp(self):
super(TestCSR1kvRouting, self).setUp()
device_params = {'management_ip_address': 'fake_ip',
'protocol_port': 22,
'credentials': {"username": "stack",
"password": "cisco"},
}
self.driver = csr_driver.CSR1kvRoutingDriver(
**device_params)
self.mock_conn = mock.MagicMock()
self.driver._csr_conn = self.mock_conn
self.driver._check_response = mock.MagicMock(return_value=True)
self.vrf = ('nrouter-' + FAKE_ID)[:csr_driver.CSR1kvRoutingDriver.
DEV_NAME_LEN]
self.driver._get_vrfs = mock.Mock(return_value=[self.vrf])
self.ex_gw_ip = '20.0.0.30'
self.ex_gw_cidr = '20.0.0.30/24'
self.ex_gw_vlan = 1000
self.ex_gw_gateway_ip = '20.0.0.1'
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': self.ex_gw_ip,
'subnet_id': _uuid()}],
'subnet': {'cidr': self.ex_gw_cidr,
'gateway_ip': self.ex_gw_gateway_ip},
'ip_cidr': self.ex_gw_cidr,
'mac_address': 'ca:fe:de:ad:be:ef',
'hosting_info': {'segmentation_id': self.ex_gw_vlan,
'hosting_port_name': 't2_p:0'}}
self.vlan_no = 500
self.gw_ip_cidr = '10.0.0.1/16'
self.gw_ip = '10.0.0.1'
self.hosting_port = 't1_p:0'
self.port = {'id': PORT_ID,
'ip_cidr': self.gw_ip_cidr,
'fixed_ips': [{'ip_address': self.gw_ip}],
'hosting_info': {'segmentation_id': self.vlan_no,
'hosting_port_name': self.hosting_port}}
int_ports = [self.port]
self.router = {
'id': FAKE_ID,
l3_constants.INTERFACE_KEY: int_ports,
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port}
self.ri = routing_svc_helper.RouterInfo(FAKE_ID, self.router)
self.ri.internal_ports = int_ports
def test_csr_get_vrf_name(self):
self.assertEqual(self.driver._csr_get_vrf_name(self.ri), self.vrf)
def test_create_vrf(self):
confstr = snippets.CREATE_VRF % self.vrf
self.driver._create_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_remove_vrf(self):
confstr = snippets.REMOVE_VRF % self.vrf
self.driver._remove_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_router_added(self):
confstr = snippets.CREATE_VRF % self.vrf
self.driver.router_added(self.ri)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_router_removed(self):
confstr = snippets.REMOVE_VRF % self.vrf
self.driver._remove_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_once_with(
target='running', config=confstr)
def test_internal_network_added(self):
self.driver._create_subinterface = mock.MagicMock()
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
self.driver.internal_network_added(self.ri, self.port)
args = (interface, self.vlan_no, self.vrf, self.gw_ip,
netaddr.IPAddress('255.255.0.0'))
self.driver._create_subinterface.assert_called_once_with(*args)
def test_internal_network_removed(self):
self.driver._remove_subinterface = mock.MagicMock()
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
self.driver.internal_network_removed(self.ri, self.port)
self.driver._remove_subinterface.assert_called_once_with(interface)
def test_routes_updated(self):
dest_net = '20.0.0.0/16'
next_hop = '10.0.0.255'
route = {'destination': dest_net,
'nexthop': next_hop}
dest = netaddr.IPAddress('20.0.0.0')
destmask = netaddr.IPNetwork(dest_net).netmask
self.driver._add_static_route = mock.MagicMock()
self.driver._remove_static_route = mock.MagicMock()
self.driver.routes_updated(self.ri, 'replace', route)
self.driver._add_static_route.assert_called_once_with(
dest, destmask, next_hop, self.vrf)
self.driver.routes_updated(self.ri, 'delete', route)
self.driver._remove_static_route.assert_called_once_with(
dest, destmask, next_hop, self.vrf)
def test_floatingip(self):
floating_ip = '15.1.2.3'
fixed_ip = '10.0.0.3'
self.driver._add_floating_ip = mock.MagicMock()
self.driver._remove_floating_ip = mock.MagicMock()
self.driver._add_interface_nat = mock.MagicMock()
self.driver._remove_dyn_nat_translations = mock.MagicMock()
self.driver._remove_interface_nat = mock.MagicMock()
self.driver.floating_ip_added(self.ri, self.ex_gw_port,
floating_ip, fixed_ip)
self.driver._add_floating_ip.assert_called_once_with(
floating_ip, fixed_ip, self.vrf)
self.driver.floating_ip_removed(self.ri, self.ex_gw_port,
floating_ip, fixed_ip)
self.driver._remove_interface_nat.assert_called_once_with(
'GigabitEthernet1.1000', 'outside')
self.driver._remove_dyn_nat_translations.assert_called_once_with()
self.driver._remove_floating_ip.assert_called_once_with(
floating_ip, fixed_ip, self.vrf)
self.driver._add_interface_nat.assert_called_once_with(
'GigabitEthernet1.1000', 'outside')
def test_external_gateway_added(self):
self.driver._create_subinterface = mock.MagicMock()
self.driver._add_default_static_route = mock.MagicMock()
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
args = (ext_interface, self.ex_gw_vlan, self.vrf, self.ex_gw_ip,
netaddr.IPAddress('255.255.255.0'))
self.driver.external_gateway_added(self.ri, self.ex_gw_port)
self.driver._create_subinterface.assert_called_once_with(*args)
self.driver._add_default_static_route.assert_called_once_with(
self.ex_gw_gateway_ip, self.vrf)
def test_enable_internal_network_NAT(self):
self.driver._nat_rules_for_internet_access = mock.MagicMock()
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
args = (('acl_' + str(self.vlan_no)),
netaddr.IPNetwork(self.gw_ip_cidr).network,
netaddr.IPNetwork(self.gw_ip_cidr).hostmask,
int_interface,
ext_interface,
self.vrf)
self.driver.enable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.driver._nat_rules_for_internet_access.assert_called_once_with(
*args)
def test_enable_internal_network_NAT_with_confstring(self):
self.driver._csr_conn.reset_mock()
self.driver._check_acl = mock.Mock(return_value=False)
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
acl_no = ('acl_' + str(self.vlan_no))
int_network = netaddr.IPNetwork(self.gw_ip_cidr).network
int_net_mask = netaddr.IPNetwork(self.gw_ip_cidr).hostmask
self.driver.enable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.assert_edit_running_config(
snippets.CREATE_ACL, (acl_no, int_network, int_net_mask))
self.assert_edit_running_config(
snippets.SET_DYN_SRC_TRL_INTFC, (acl_no, ext_interface, self.vrf))
self.assert_edit_running_config(
snippets.SET_NAT, (int_interface, 'inside'))
self.assert_edit_running_config(
snippets.SET_NAT, (ext_interface, 'outside'))
def test_disable_internal_network_NAT(self):
self.driver._remove_interface_nat = mock.MagicMock()
self.driver._remove_dyn_nat_translations = mock.MagicMock()
self.driver._remove_dyn_nat_rule = mock.MagicMock()
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
self.driver.disable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
args = (('acl_' + str(self.vlan_no)), ext_interface, self.vrf)
self.driver._remove_interface_nat.assert_called_once_with(
int_interface, 'inside')
self.driver._remove_dyn_nat_translations.assert_called_once_with()
self.driver._remove_dyn_nat_rule.assert_called_once_with(*args)
def assert_edit_running_config(self, snippet_name, args):
if args:
confstr = snippet_name % args
else:
confstr = snippet_name
self.driver._csr_conn.edit_config.assert_any_call(
target='running', config=confstr)
def test_disable_internal_network_NAT_with_confstring(self):
self.driver._cfg_exists = mock.Mock(return_value=True)
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
acl_no = 'acl_' + str(self.vlan_no)
self.driver.disable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.assert_edit_running_config(
snippets.REMOVE_NAT, (int_interface, 'inside'))
self.assert_edit_running_config(snippets.CLEAR_DYN_NAT_TRANS, None)
self.assert_edit_running_config(
snippets.REMOVE_DYN_SRC_TRL_INTFC, (acl_no, ext_interface,
self.vrf))
self.assert_edit_running_config(snippets.REMOVE_ACL, acl_no)
|
andhit-r/account-financial-tools | refs/heads/8.0 | account_netting/tests/test_account_netting.py | 14 | # -*- coding: utf-8 -*-
# (c) 2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
from openerp import workflow
class TestAccountNetting(common.TransactionCase):
def setUp(self):
super(TestAccountNetting, self).setUp()
self.partner = self.env['res.partner'].create(
{'supplier': True,
'customer': True,
'name': "Supplier/Customer"
})
self.invoice_model = self.env['account.invoice']
self.account_receivable = self.env.ref('account.a_recv')
self.customer_invoice = self.invoice_model.create(
{'journal_id': self.env.ref('account.sales_journal').id,
'type': 'out_invoice',
'partner_id': self.partner.id,
'account_id': self.account_receivable.id,
'invoice_line': [(0, 0, {'name': 'Test',
'price_unit': 100.0})],
})
workflow.trg_validate(
self.uid, 'account.invoice', self.customer_invoice.id,
'invoice_open', self.cr)
customer_move = self.customer_invoice.move_id
self.move_line_1 = customer_move.line_id.filtered(
lambda x: x.account_id == self.account_receivable)
self.account_payable = self.env.ref('account.a_pay')
self.supplier_invoice = self.invoice_model.create(
{'journal_id': self.env.ref('account.expenses_journal').id,
'type': 'in_invoice',
'partner_id': self.partner.id,
'account_id': self.account_payable.id,
'invoice_line': [(0, 0, {'name': 'Test',
'price_unit': 1200.0})],
})
workflow.trg_validate(
self.uid, 'account.invoice', self.supplier_invoice.id,
'invoice_open', self.cr)
supplier_move = self.supplier_invoice.move_id
self.move_line_2 = supplier_move.line_id.filtered(
lambda x: x.account_id == self.account_payable)
def test_compensation(self):
obj = self.env['account.move.make.netting'].with_context(
active_ids=[self.move_line_1.id, self.move_line_2.id])
wizard = obj.create(
{'move_lines': [(6, 0, [self.move_line_1.id,
self.move_line_2.id])],
'journal': self.env.ref('account.miscellaneous_journal').id})
res = wizard.button_compensate()
move = self.env['account.move'].browse(res['res_id'])
self.assertEqual(
len(move.line_id), 2,
'AR/AP netting move has an incorrect line number')
move_line_receivable = move.line_id.filtered(
lambda x: x.account_id == self.account_receivable)
self.assertEqual(
move_line_receivable.credit, 100,
'Incorrect credit amount for receivable move line')
self.assertTrue(
move_line_receivable.reconcile_id,
'Receivable move line should be totally reconciled')
move_line_payable = move.line_id.filtered(
lambda x: x.account_id == self.account_payable)
self.assertEqual(
move_line_payable.debit, 100,
'Incorrect debit amount for payable move line')
self.assertTrue(
move_line_payable.reconcile_partial_id,
'Receivable move line should be partially reconciled')
|
AndrewSallans/osf.io | refs/heads/develop | website/addons/osfstorage/errors.py | 4 | #!/usr/bin/env python
# encoding: utf-8
class OsfStorageError(Exception):
pass
class PathLockedError(OsfStorageError):
pass
class SignatureConsumedError(OsfStorageError):
pass
class VersionNotFoundError(OsfStorageError):
pass
class SignatureMismatchError(OsfStorageError):
pass
class VersionStatusError(OsfStorageError):
pass
class DeleteError(OsfStorageError):
pass
class UndeleteError(OsfStorageError):
pass
class InvalidVersionError(OsfStorageError):
pass
class MissingFieldError(OsfStorageError):
pass
|
allanlei/django-apipy | refs/heads/master | api/soap/service.py | 1 | from api.service import ApiService
from api.soap.message import WebServiceDocument
from api.errors import ValidationError
from api.utils import dict_search
from lxml import etree
from lxml.builder import ElementMaker
from django.http import HttpResponse
from namespaces import DEFAULT_NAMESPACES, SOAP_ENV, XSI
from containers import WebServiceFunction
class WebService(ApiService):
class Meta(ApiService):
wrapper = WebServiceFunction
def __init__(self, target_ns=None, namespaces=DEFAULT_NAMESPACES, ns_name=None, *args, **kwargs):
super(WebService, self).__init__(*args, **kwargs)
self.target_ns = target_ns
self.namespaces = namespaces
self.add_tns_entry(ns_name, self.target_ns)
self.wsdl = WebServiceDocument(
namespaces=self.namespaces,
target_ns=self.target_ns,
service_name=self.service_name,
service_url=self.service_url
)
def add_tns_entry(self, tns_name, tns_namespace):
counter = None
tns_name = '%s%s' % (tns_name or 'ns', counter or '')
while self.namespaces.search(tns_name) not in [tns_namespace, None]:
counter = (counter or 0) + 1
tns_name = '%s%s' % (tns_name or 'ns', counter or '')
self.namespaces.update({tns_name: tns_namespace})
def add_method_hook(self, fn):
self.wsdl.functions = [fn for fn in self.functions.values()]
def generate_wsdl(self, request):
return HttpResponse(str(self.wsdl), content_type='text/xml')
def get_function(self, function):
name = function.tag.replace('{%s}' % function.nsmap[function.prefix], '')
arg_elements = function.xpath('%s:*' % self.namespaces.search(value=self.target_ns), namespaces=self.namespaces)
args = dict([(arg.tag.replace('{%s}' % arg.nsmap[arg.prefix], ''), arg.text) for arg in arg_elements])
return super(WebService, self).get_function(name, **args)
def validate_request(self, request, accepted=['POST']):
return request.method in accepted
def parse_request(self, request):
message = etree.fromstring(request.raw_post_data)
header = message.xpath('%s:Header' % SOAP_ENV, namespaces=self.namespaces)[0]
body = message.xpath('%s:Body' % SOAP_ENV, namespaces=self.namespaces)[0]
if header is None or body is None:
raise ValidationError('Not a SOAP request')
if len(header) == 0 and len(body) == 0:
raise ValidationError('Empty SOAP envelope')
if len(body) > 1:
raise ValidationError('Too many requested functions')
functions = body.xpath('%s:*' % self.namespaces.search(value=self.target_ns), namespaces=self.namespaces)
return functions[0]
def process(self, request, parsed_data):
function = parsed_data
wsf, args = self.get_function(function)
result = wsf.dispatch(request, **args)
return result, wsf
def package(self, request, response, function=None):
E = ElementMaker(namespace=self.namespaces.search(SOAP_ENV), nsmap=self.namespaces)
wsf = function
envelope = E.Envelope(
E.Header(),
E.Body(
E('{%s}%sResponse' % (self.target_ns, wsf.function_name),
E('{%s}%sResult' % (self.target_ns, wsf.function_name),
response,
**{'{%s}type' % self.namespaces.search(XSI): '%s:%s' % (self.namespaces.search(value=wsf.outtype.Meta.namespace), wsf.outtype.Meta.name)}
)
)
)
)
return HttpResponse(etree.tostring(envelope), content_type='text/xml')
|
beni55/sentry | refs/heads/master | src/sentry/__init__.py | 4 | """
sentry
~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import os.path
try:
VERSION = __import__('pkg_resources') \
.get_distribution('sentry').version
except Exception, e:
VERSION = 'unknown'
def _get_git_revision(path):
revision_file = os.path.join(path, 'refs', 'heads', 'master')
if not os.path.exists(revision_file):
return None
fh = open(revision_file, 'r')
try:
return fh.read().strip()[:7]
finally:
fh.close()
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, os.pardir, os.pardir))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None
def get_version():
base = VERSION
if __build__:
base = '%s (%s)' % (base, __build__)
return base
__build__ = get_revision()
__docformat__ = 'restructuredtext en'
|
home-assistant/home-assistant | refs/heads/dev | tests/util/test_network.py | 5 | """Test Home Assistant volume utility functions."""
from ipaddress import ip_address
import homeassistant.util.network as network_util
def test_is_loopback():
"""Test loopback addresses."""
assert network_util.is_loopback(ip_address("127.0.0.2"))
assert network_util.is_loopback(ip_address("127.0.0.1"))
assert network_util.is_loopback(ip_address("::1"))
assert network_util.is_loopback(ip_address("::ffff:127.0.0.0"))
assert network_util.is_loopback(ip_address("0:0:0:0:0:0:0:1"))
assert network_util.is_loopback(ip_address("0:0:0:0:0:ffff:7f00:1"))
assert not network_util.is_loopback(ip_address("104.26.5.238"))
assert not network_util.is_loopback(ip_address("2600:1404:400:1a4::356e"))
def test_is_private():
"""Test private addresses."""
assert network_util.is_private(ip_address("192.168.0.1"))
assert network_util.is_private(ip_address("172.16.12.0"))
assert network_util.is_private(ip_address("10.5.43.3"))
assert network_util.is_private(ip_address("fd12:3456:789a:1::1"))
assert not network_util.is_private(ip_address("127.0.0.1"))
assert not network_util.is_private(ip_address("::1"))
def test_is_link_local():
"""Test link local addresses."""
assert network_util.is_link_local(ip_address("169.254.12.3"))
assert not network_util.is_link_local(ip_address("127.0.0.1"))
def test_is_invalid():
"""Test invalid address."""
assert network_util.is_invalid(ip_address("0.0.0.0"))
assert not network_util.is_invalid(ip_address("127.0.0.1"))
def test_is_local():
"""Test local addresses."""
assert network_util.is_local(ip_address("192.168.0.1"))
assert network_util.is_local(ip_address("127.0.0.1"))
assert not network_util.is_local(ip_address("208.5.4.2"))
def test_is_ip_address():
"""Test if strings are IP addresses."""
assert network_util.is_ip_address("192.168.0.1")
assert network_util.is_ip_address("8.8.8.8")
assert network_util.is_ip_address("::ffff:127.0.0.0")
assert not network_util.is_ip_address("192.168.0.999")
assert not network_util.is_ip_address("192.168.0.0/24")
assert not network_util.is_ip_address("example.com")
def test_normalize_url():
"""Test the normalizing of URLs."""
assert network_util.normalize_url("http://example.com") == "http://example.com"
assert network_util.normalize_url("https://example.com") == "https://example.com"
assert network_util.normalize_url("https://example.com/") == "https://example.com"
assert (
network_util.normalize_url("https://example.com:443") == "https://example.com"
)
assert network_util.normalize_url("http://example.com:80") == "http://example.com"
assert (
network_util.normalize_url("https://example.com:80") == "https://example.com:80"
)
assert (
network_util.normalize_url("http://example.com:443") == "http://example.com:443"
)
assert (
network_util.normalize_url("https://example.com:443/test/")
== "https://example.com/test"
)
|
alexandrevicenzi/editor | refs/heads/master | hunspell-0.1/setup.py | 1 | #!/usr/bin/python
from distutils.core import setup, Extension
main = Extension( 'hunspell',
define_macros = [('_LINUX',None)],
libraries = ['hunspell-1.3'],
include_dirs = ['/usr/include/hunspell'],
sources = ['hunspell.c'],
extra_compile_args = ['-Wall'])
setup( name = "hunspell",
version = "0.1",
description = "Module for the Hunspell spellchecker engine",
author="Sayamindu Dasgupta",
author_email="sayamindu@gmail.com",
url="http://code.google.com/p/pyhunspell",
ext_modules = [main])
|
dnlm92/chokoretto | refs/heads/master | temp/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/__init__.py | 1777 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
|
phil-lopreiato/the-blue-alliance | refs/heads/master | models/fcm/platform_config.py | 3 | from consts.fcm.platform_priority import PlatformPriority
class PlatformConfig(object):
"""
Represents platform-specific push notification configuration options.
https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages
Args:
collapse_key (string): Collapse key for push notification - may be None.
priority (int): Priority for push notification - may be None.
"""
# TODO: Add ttl
def __init__(self, collapse_key=None, priority=None):
"""
Args:
collapse_key (string): Collapse key for push notification - may be None.
priority (int): Priority for push notification - may be None.
"""
self.collapse_key = collapse_key
# Check that our priority looks right
if priority:
PlatformPriority.validate(priority)
self.priority = priority
def __str__(self):
return 'PlatformConfig(collapse_key="{}" priority={})'.format(self.collapse_key, self.priority)
def platform_config(self, platform_type):
""" Return a platform-specific configuration object for a platform_type, given the platform payload.
Args:
platform_type (PlatformType): Type for the platform config.
Returns:
object: Either a AndroidConfig, ApnsConfig, or WebpushConfig depending on the platform_type.
"""
from consts.fcm.platform_type import PlatformType
# Validate that platform_type is supported
PlatformType.validate(platform_type)
from firebase_admin import messaging
if platform_type == PlatformType.ANDROID:
priority = PlatformPriority.platform_priority(platform_type, self.priority) \
if self.priority is not None else None
return messaging.AndroidConfig(
collapse_key=self.collapse_key,
priority=priority
)
else:
headers = {}
if self.collapse_key:
headers[PlatformType.collapse_key_key(platform_type)] = self.collapse_key
if self.priority is not None:
priority = PlatformPriority.platform_priority(platform_type, self.priority)
headers[PlatformType.priority_key(platform_type)] = priority
# Null out headers if they're empty
headers = headers if headers else None
if platform_type == PlatformType.APNS:
# Create an empty `payload` as a workaround for an FCM bug
# https://github.com/the-blue-alliance/the-blue-alliance/pull/2557#discussion_r310365295
payload = messaging.APNSPayload(aps=messaging.Aps())
return messaging.APNSConfig(headers=headers, payload=payload)
elif platform_type == PlatformType.WEBPUSH:
return messaging.WebpushConfig(headers=headers)
else:
raise TypeError("Unsupported PlatformPayload platform_type: {}".format(platform_type))
|
archf/ansible | refs/heads/devel | lib/ansible/plugins/lookup/items.py | 46 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
|
SyrakuShaikh/python | refs/heads/master | hackerrank/project_euler/7.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2017-03-23 Thu 19:14:51 Shaikh>
"""
Project Euler #7: 10001st Prime
By listing the first six prime numbers: 2, 3, 5, 7, 11 and 13, we can see that
the 6th prime is 13. What is the Nth prime number?
"""
from math import sqrt, ceil
WHEELS = [2, 3, 5, 7, 11, 13, 17, 19]
def sieve(total: int):
global WHEELS
current = WHEELS[-1] + 2
while len(WHEELS) < total:
test = ceil(sqrt(current))
for w in WHEELS:
if test < w:
WHEELS.append(current)
break
elif current % w == 0:
break
current += 2
return WHEELS[total-1]
T = int(input().strip())
for i in range(T):
n = int(input().strip())
print(sieve(n))
|
LinusU/fbthrift | refs/heads/master | thrift/compiler/test/fixtures/namespace/gen-py/my/namespacing/__init__.py | 146 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
|
JianyuWang/neutron | refs/heads/master | neutron/tests/tempest/services/identity/v3/json/endpoints_client.py | 24 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from neutron.tests.tempest.common import service_client
class EndPointClientJSON(service_client.ServiceClient):
api_version = "v3"
def list_endpoints(self):
"""GET endpoints."""
resp, body = self.get('endpoints')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['endpoints'])
def create_endpoint(self, service_id, interface, url, **kwargs):
"""Create endpoint.
Normally this function wouldn't allow setting values that are not
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
"""
region = kwargs.get('region', None)
if 'force_enabled' in kwargs:
enabled = kwargs.get('force_enabled', None)
else:
enabled = kwargs.get('enabled', None)
post_body = {
'service_id': service_id,
'interface': interface,
'url': url,
'region': region,
'enabled': enabled
}
post_body = json.dumps({'endpoint': post_body})
resp, body = self.post('endpoints', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['endpoint'])
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
url=None, region=None, enabled=None, **kwargs):
"""Updates an endpoint with given parameters.
Normally this function wouldn't allow setting values that are not
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
"""
post_body = {}
if service_id is not None:
post_body['service_id'] = service_id
if interface is not None:
post_body['interface'] = interface
if url is not None:
post_body['url'] = url
if region is not None:
post_body['region'] = region
if 'force_enabled' in kwargs:
post_body['enabled'] = kwargs['force_enabled']
elif enabled is not None:
post_body['enabled'] = enabled
post_body = json.dumps({'endpoint': post_body})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['endpoint'])
def delete_endpoint(self, endpoint_id):
"""Delete endpoint."""
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
self.expected_success(204, resp_header.status)
return service_client.ResponseBody(resp_header, resp_body)
|
josh-willis/pycbc | refs/heads/master | pycbc/vetoes/autochisq.py | 12 | # Copyright (C) 2013 Stas Babak
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pycbc.filter import make_frequency_series
from pycbc.filter import matched_filter_core
from pycbc.types import Array
import numpy as np
import logging
BACKEND_PREFIX="pycbc.vetoes.autochisq_"
def autochisq_from_precomputed(sn, corr_sn, hautocorr, indices,
stride=1, num_points=None, oneside=None,
twophase=True, maxvalued=False):
"""
Compute correlation (two sided) between template and data
and compares with autocorrelation of the template: C(t) = IFFT(A*A/S(f))
Parameters
----------
sn: Array[complex]
normalized (!) array of complex snr for the template that produced the
trigger(s) being tested
corr_sn : Array[complex]
normalized (!) array of complex snr for the template that you want to
produce a correlation chisq test for. In the [common] case that sn and
corr_sn are the same, you are computing auto-correlation chisq.
hautocorr: Array[complex]
time domain autocorrelation for the template
indices: Array[int]
compute correlation chisquare at the points specified in this array,
num_points: [int, optional; default=None]
Number of points used for autochisq on each side, if None all points
are used.
stride: [int, optional; default = 1]
stride for points selection for autochisq
total length <= 2*num_points*stride
oneside: [str, optional; default=None]
whether to use one or two sided autochisquare. If None (or not
provided) twosided chi-squared will be used. If given, options are
'left' or 'right', to do one-sided chi-squared on the left or right.
twophase: Boolean, optional; default=True
If True calculate the auto-chisq using both phases of the filter.
If False only use the phase of the obtained trigger(s).
maxvalued: Boolean, optional; default=False
Return the largest auto-chisq at any of the points tested if True.
If False, return the sum of auto-chisq at all points tested.
Returns
-------
autochisq: [tuple]
returns autochisq values and snr corresponding to the instances
of time defined by indices
"""
Nsnr = len(sn)
achisq = np.zeros(len(indices))
num_points_all = int(Nsnr/stride)
if num_points is None:
num_points = num_points_all
if (num_points > num_points_all):
num_points = num_points_all
snrabs = np.abs(sn[indices])
cphi_array = (sn[indices]).real / snrabs
sphi_array = (sn[indices]).imag / snrabs
start_point = - stride*num_points
end_point = stride*num_points+1
if oneside == 'left':
achisq_idx_list = np.arange(start_point, 0, stride)
elif oneside == 'right':
achisq_idx_list = np.arange(stride, end_point, stride)
else:
achisq_idx_list_pt1 = np.arange(start_point, 0, stride)
achisq_idx_list_pt2 = np.arange(stride, end_point, stride)
achisq_idx_list = np.append(achisq_idx_list_pt1,
achisq_idx_list_pt2)
hauto_corr_vec = hautocorr[achisq_idx_list]
hauto_norm = hauto_corr_vec.real*hauto_corr_vec.real
# REMOVE THIS LINE TO REPRODUCE OLD RESULTS
hauto_norm += hauto_corr_vec.imag*hauto_corr_vec.imag
chisq_norm = 1.0 - hauto_norm
for ip,ind in enumerate(indices):
curr_achisq_idx_list = achisq_idx_list + ind
cphi = cphi_array[ip]
sphi = sphi_array[ip]
# By construction, the other "phase" of the SNR is 0
snr_ind = sn[ind].real*cphi + sn[ind].imag*sphi
# Wrap index if needed (maybe should fail in this case?)
if curr_achisq_idx_list[0] < 0:
curr_achisq_idx_list[curr_achisq_idx_list < 0] += Nsnr
if curr_achisq_idx_list[-1] > (Nsnr - 1):
curr_achisq_idx_list[curr_achisq_idx_list > (Nsnr-1)] -= Nsnr
z = corr_sn[curr_achisq_idx_list].real*cphi + \
corr_sn[curr_achisq_idx_list].imag*sphi
dz = z - hauto_corr_vec.real*snr_ind
curr_achisq_list = dz*dz/chisq_norm
if twophase:
chisq_norm = 1.0 - hauto_norm
z = -corr_sn[curr_achisq_idx_list].real*sphi + \
corr_sn[curr_achisq_idx_list].imag*cphi
dz = z - hauto_corr_vec.imag*snr_ind
curr_achisq_list += dz*dz/chisq_norm
if maxvalued:
achisq[ip] = curr_achisq_list.max()
else:
achisq[ip] = curr_achisq_list.sum()
dof = num_points
if oneside is None:
dof = dof * 2
if twophase:
dof = dof * 2
return dof, achisq, indices
class SingleDetAutoChisq(object):
"""Class that handles precomputation and memory management for efficiently
running the auto chisq in a single detector inspiral analysis.
"""
def __init__(self, stride, num_points, onesided=None, twophase=False,
reverse_template=False, take_maximum_value=False,
maximal_value_dof=None):
"""
Initialize autochisq calculation instance
Parameters
-----------
stride : int
Number of sample points between points at which auto-chisq is
calculated.
num_points : int
Number of sample points at which to calculate auto-chisq in each
direction from the trigger
onesided : optional, default=None, choices=['left','right']
If None (default), calculate auto-chisq in both directions from the
trigger. If left (backwards in time) or right (forwards in time)
calculate auto-chisq only in that direction.
twophase : optional, default=False
If False calculate auto-chisq using only the phase of the trigger.
If True, compare also against the orthogonal phase.
reverse_template : optional, default=False
If true, time-reverse the template before calculating auto-chisq.
In this case this is more of a cross-correlation chisq than auto.
take_maximum_value : optional, default=False
If provided, instead of adding the auto-chisq value at each sample
point tested, return only the maximum value.
maximal_value_dof : int, required if using take_maximum_value
If using take_maximum_value the expected value is not known. This
value specifies what to store in the cont_chisq_dof output.
"""
if stride > 0:
self.do = True
self.column_name = "cont_chisq"
self.table_dof_name = "cont_chisq_dof"
self.dof = num_points
self.num_points = num_points
self.stride = stride
self.one_sided = onesided
if (onesided is not None):
self.dof = self.dof * 2
self.two_phase = twophase
if self.two_phase:
self.dof = self.dof * 2
self.reverse_template = reverse_template
self.take_maximum_value=take_maximum_value
if self.take_maximum_value:
if maximal_value_dof is None:
err_msg = "Must provide the maximal_value_dof keyword "
err_msg += "argument if using the take_maximum_value "
err_msg += "option."
raise ValueError(err_msg)
self.dof = maximal_value_dof
self._autocor = None
self._autocor_id = None
else:
self.do = False
def values(self, sn, indices, template, psd, norm, stilde=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
"""
Calculate the auto-chisq at the specified indices.
Parameters
-----------
sn : Array[complex]
SNR time series of the template for which auto-chisq is being
computed. Provided unnormalized.
indices : Array[int]
List of points at which to calculate auto-chisq
template : Pycbc template object
The template for which we are calculating auto-chisq
psd : Pycbc psd object
The PSD of the data being analysed
norm : float
The normalization factor to apply to sn
stilde : Pycbc data object, needed if using reverse-template
The data being analysed. Only needed if using reverse-template,
otherwise ignored
low_frequency_cutoff : float
The lower frequency to consider in matched-filters
high_frequency_cutoff : float
The upper frequency to consider in matched-filters
"""
if self.do and (len(indices) > 0):
htilde = make_frequency_series(template)
# Check if we need to recompute the autocorrelation
key = (id(template), id(psd))
if key != self._autocor_id:
logging.info("Calculating autocorrelation")
if not self.reverse_template:
Pt, _, P_norm = matched_filter_core(htilde,
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
Pt = Pt * (1./ Pt[0])
self._autocor = Array(Pt, copy=True)
else:
Pt, _, P_norm = matched_filter_core(htilde.conj(),
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
# T-reversed template has same norm as forward template
# so we can normalize using that
# FIXME: Here sigmasq has to be cast to a float or the
# code is really slow ... why??
norm_fac = P_norm / float(((template.sigmasq(psd))**0.5))
Pt *= norm_fac
self._autocor = Array(Pt, copy=True)
self._autocor_id = key
logging.info("...Calculating autochisquare")
sn = sn*norm
if self.reverse_template:
assert(stilde is not None)
asn, _, ahnrm = matched_filter_core(htilde.conj(), stilde,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff,
h_norm=template.sigmasq(psd))
correlation_snr = asn * ahnrm
else:
correlation_snr = sn
achi_list = np.array([])
index_list = np.array(indices)
dof, achi_list, _ = autochisq_from_precomputed(sn, correlation_snr,
self._autocor, index_list, stride=self.stride,
num_points=self.num_points,
oneside=self.one_sided, twophase=self.two_phase,
maxvalued=self.take_maximum_value)
self.dof = dof
return achi_list
class SingleDetSkyMaxAutoChisq(SingleDetAutoChisq):
"""Stub for precessing auto chisq if anyone ever wants to code it up.
"""
def __init__(self, *args, **kwds):
super(SingleDetSkyMaxAutoChisq, self).__init__(*args, **kwds)
def values(self, *args, **kwargs):
if self.do:
err_msg = "Precessing single detector sky-max auto chisq has not "
err_msg += "been written. If you want to use it, why not help "
err_msg += "write it?"
raise NotImplementedError(err_msg)
else:
return None
|
jefffohl/nupic | refs/heads/master | examples/opf/experiments/multistep/hotgym_best_sp_5step/description.py | 32 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'clVerbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tpParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
|
weiting-chen/manila | refs/heads/master | manila/tests/conf_fixture.py | 4 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_policy import opts
from manila.common import config
CONF = config.CONF
def set_defaults(conf):
_safe_set_of_opts(conf, 'verbose', True)
_safe_set_of_opts(conf, 'state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..',
'..')))
_safe_set_of_opts(conf, 'connection', "sqlite://", group='database')
_safe_set_of_opts(conf, 'sqlite_synchronous', False)
_POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path,
'manila/tests/policy.json'))
opts.set_defaults(conf, policy_file=_POLICY_PATH)
_safe_set_of_opts(conf, 'service_instance_user', 'fake_user')
_API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path,
'etc/manila/api-paste.ini'))
_safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH)
_safe_set_of_opts(conf, 'share_driver',
'manila.tests.fake_driver.FakeShareDriver')
_safe_set_of_opts(conf, 'auth_strategy', 'noauth')
def _safe_set_of_opts(conf, *args, **kwargs):
try:
conf.set_default(*args, **kwargs)
except config.cfg.NoSuchOptError:
# Assumed that opt is not imported and not used
pass
|
agend/elliptics | refs/heads/master | tests/pytests/test_session_rwr.py | 1 | #!/usr/bin/env python
# =============================================================================
# 2013+ Copyright (c) Kirill Smorodinnikov <shaitkir@gmail.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# =============================================================================
import sys
sys.path.insert(0, "") # for running from cmake
import pytest
from conftest import simple_node, make_session
from server import server
import elliptics
def write_cas_converter(x):
return '__' + x + '__'
def check_write_results(results, number, data, session):
assert len(results) == number
for r in results:
assert type(r) == elliptics.core.LookupResultEntry
assert r.size == 48 + len(data) # 48 is the size of data header
assert r.error.code == 0
assert r.error.message == ''
assert r.group_id in session.routes.get_address_groups(r.address)
def checked_write(session, key, data,):
results = session.write_data(key, data).get()
check_write_results(results, len(session.groups), data, session)
def checked_bulk_write(session, datas, data):
results = session.bulk_write(datas).get()
check_write_results(results, len(session.groups) * len(datas), data, session)
def check_read_results(results, number, data, session):
assert len(results) == number
assert type(results[0]) == elliptics.core.ReadResultEntry
assert results[0].data == data
assert results[0].group_id in session.routes.get_address_groups(results[0].address)
return results
def checked_read(session, key, data):
results = session.read_data(key).get()
check_read_results(results, 1, data, session)
def checked_bulk_read(session, keys, data):
results = session.bulk_read(keys).get()
check_read_results(results, len(keys), data, session)
class TestSession:
@pytest.mark.parametrize('key, data', [
('', ''),
('', 'data'),
('without_group_key_1', ''),
('without_group_key_2', 'data'),
("without_group_key_3", '309u8ryeygwvfgadd0u9g8y0ahbg8')])
def test_write_without_groups(self, server, simple_node, key, data):
session = make_session(node=simple_node,
test_name='TestSession.test_write_without_groups')
result = session.write_data(key, data)
try:
result.get()
except elliptics.Error as e:
assert e.message.message == 'insufficient results count due to'\
' checker: 0 of 1 (1): No such device or address: -6'
else:
pytest.fail('Failed: DID NOT RAISE')
@pytest.mark.parametrize('key, data, exception', [
#('', '', elliptics.core.NotFoundError),
#('all_group_key_1', '', elliptics.core.NotFoundError),
('', 'data', None),
('all_group_key_2', 'data', None),
("all_group_key_3", '309u8ryeygwvfgadd0u9g8y0ahbg8',
None)])
def test_write_to_all_groups(self, server, simple_node,
key, data, exception):
session = make_session(node=simple_node,
test_name='TestSession.test_write_to_all_groups')
groups = session.routes.groups()
session.groups = groups
if exception:
with pytest.raises(exception):
checked_write(session, key, data)
else:
checked_write(session, key, data)
def test_write_to_one_group(self, server, simple_node):
data = 'some data'
session = make_session(node=simple_node,
test_name='TestSession.test_write_to_one_group')
for group in session.routes.groups():
tmp_key = 'one_groups_key_' + str(group)
session.groups = [group]
checked_write(session, tmp_key, data)
other_groups = list(session.routes.groups())
other_groups.remove(group)
session.groups = other_groups
with pytest.raises(elliptics.NotFoundError):
results = session.read_data(tmp_key).get()
assert results == []
def test_write_namespace(self, server, simple_node):
key = 'namespaced_key'
ns1 = 'namesapce 1'
ns2 = 'namespace 2'
data1 = 'some data 1'
data2 = 'unique data 2'
session = make_session(node=simple_node,
test_name='TestSession.test_write_namespace')
groups = session.routes.groups()
session.groups = groups
session.set_namespace(ns1)
checked_write(session, key, data1)
session.set_namespace(ns2)
checked_write(session, key, data2)
session.set_namespace(ns1)
checked_read(session, key, data1)
session.set_namespace(ns2)
checked_read(session, key, data2)
@pytest.mark.parametrize('key, data, offset, size', [
('diff key 1', 'init data', 0, 4),
('diff key 1', 'rewrite data', 2, 0)
])
def test_different_writes(self, server, simple_node,
key, data, offset, size):
pass
def test_write_append(self, server, simple_node):
key1 = 'append_key_1'
key2 = 'append_key_2'
data1 = 'some data 1'
data2 = 'some data 2'
session = make_session(node=simple_node,
test_name='TestSession.test_write_append')
groups = session.routes.groups()
session.groups = groups
session.ioflags = elliptics.io_flags.default
checked_write(session, key1, data1)
checked_read(session, key1, data1)
session.ioflags |= elliptics.io_flags.append
checked_write(session, key1, data2)
checked_read(session, key1, data1 + data2)
checked_write(session, key2, data1)
checked_read(session, key2, data1)
checked_write(session, key2, data2)
checked_read(session, key2, data1 + data2)
def test_bulk_write_read(self, server, simple_node):
session = make_session(node=simple_node,
test_name='TestSession.test_bulk_write_read')
groups = session.routes.groups()
session.groups = groups
data = 'data'
keys = ['bulk key ' + str(i) for i in xrange(100)]
checked_bulk_write(session, [(key, data) for key in keys], data)
checked_bulk_read(session, keys, data)
session.set_namespace('bulk additional namespace')
checked_bulk_write(session, dict.fromkeys(keys, 'data'), data)
checked_bulk_read(session, keys, data)
def test_write_cas(self, server, simple_node):
session = make_session(node=simple_node,
test_name='TestSession.test_write_cas')
groups = session.routes.groups()
session.groups = groups
key = 'cas key'
data1 = 'data 1'
data2 = 'data 2'
checked_write(session, key, data1)
checked_read(session, key, data1)
results = session.write_cas(key, data2, session.transform(data1)).get()
check_write_results(results, len(session.groups), data2, session)
checked_read(session, key, data2)
ndata = write_cas_converter(data2)
results = session.write_cas(key, write_cas_converter).get()
check_write_results(results, len(session.groups), ndata, session)
checked_read(session, key, ndata)
def test_prepare_write_commit(self, server, simple_node):
session = make_session(node=simple_node,
test_name='TestSession.test_prepare_write_commit')
session.groups = [session.routes.groups()[0]]
routes = session.routes.filter_by_groups(session.groups)
pos, records, addr, back = (0, 0, None, 0)
for id, address, backend in routes.get_unique_routes():
ranges = routes.get_address_backend_ranges(address, backend)
statistics = session.monitor_stat(address, elliptics.monitor_stat_categories.backend).get()[0].statistics
session._node._logger.log(elliptics.log_level.debug, "monitor: stat: {0}".format(statistics))
records_in_blob = statistics['backends']['{0}'.format(backend)]['backend']['config']['records_in_blob']
for i, (begin, end) in enumerate(ranges):
if int(str(end), 16) - int(str(begin), 16) > records_in_blob * 2:
pos = int(str(begin), 16)
records = records_in_blob * 2
addr, back = address, backend
assert pos
assert records
for i in range(pos, pos + records):
r = session.write_data(elliptics.Id(format(i, 'x')), 'data').get()
assert len(r) == 1
assert r[0].address == addr
pos_id = elliptics.Id(format(i, 'x'))
prepare_size = 1<<10
data = 'a' + 'b' * (prepare_size - 2) + 'c'
session.write_prepare(pos_id, data[0], 0, 1<<10).get()
session.write_plain(pos_id, data[1:-1], 1).get()
session.write_commit(pos_id, data[-1], prepare_size - 1, prepare_size).get()
assert session.read_data(pos_id).get()[0].data == data
|
longde123/MultiversePlatform | refs/heads/master | lib/IPCE/Lib/email/MIMEMessage.py | 10 | # Copyright (C) 2001-2004 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing message/* MIME documents."""
from email import Message
from email.MIMENonMultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, Message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
Message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
|
mintuhouse/shotfactory | refs/heads/master | setup.py | 1 | # browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Installation script for use with distutils.
"""
__revision__ = "$Rev: 3047 $"
__date__ = "$Date: 2008-09-03 03:11:17 +0530 (Wed, 03 Sep 2008) $"
__author__ = "$Author: johann $"
from distutils.core import setup
import sys
kwargs = {
'name': 'ShotFactory',
'version': '0.4.0',
'description': 'Screenshot factory for browsershots.org',
'author': 'Johann C. Rocholl',
'author_email': 'johann@browsershots.org',
'url': 'http://v04.browsershots.org/',
'packages': [
'shotfactory04',
'shotfactory04.gui',
'shotfactory04.gui.darwin',
'shotfactory04.gui.linux',
'shotfactory04.gui.windows',
'shotfactory04.image',
'shotfactory04.servers',
],
'scripts': [
'shotfactory.py',
'browsershot.py',
'ppmoffset.py',
],
}
if 'py2exe' in sys.argv:
import py2exe
# modulefinder can't handle runtime changes to __path__,
# but win32com uses them
import modulefinder
import win32com
for path in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", path)
__import__("win32com.shell")
m = sys.modules["win32com.shell"]
for path in m.__path__[1:]:
modulefinder.AddPackagePath("win32com.shell", path)
# py2exe configuration
kwargs['console'] = [{
'script': 'shotfactory.py',
'icon_resources': [(1, 'favicon.ico')],
}]
kwargs['options'] = {
'py2exe': {
'includes': ','.join([
'shotfactory04.gui.windows.firefox',
'shotfactory04.gui.windows.shiretoko',
'shotfactory04.gui.windows.chrome',
'shotfactory04.gui.windows.flock',
'shotfactory04.gui.windows.k_meleon',
'shotfactory04.gui.windows.msie',
'shotfactory04.gui.windows.minefield',
'shotfactory04.gui.windows.navigator',
'shotfactory04.gui.windows.opera',
'shotfactory04.gui.windows.safari',
'shotfactory04.gui.windows.seamonkey',
]),
'dist_dir': 'bin',
}
}
setup(**kwargs)
|
kdwink/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/graphmod.py | 93 | # Revision graph generator for Mercurial
#
# Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""supports walking the history as DAGs suitable for graphical output
The most basic format we use is that of::
(id, type, data, [parentids])
The node and parent ids are arbitrary integers which identify a node in the
context of the graph returned. Type is a constant specifying the node type.
Data depends on type.
"""
from mercurial.node import nullrev
import util
CHANGESET = 'C'
def dagwalker(repo, revs):
"""cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
This generator function walks through revisions (which should be ordered
from bigger to lower). It returns a tuple for each node. The node and parent
ids are arbitrary integers which identify a node in the context of the graph
returned.
"""
if not revs:
return
cl = repo.changelog
lowestrev = min(revs)
gpcache = {}
knownrevs = set(revs)
for rev in revs:
ctx = repo[rev]
parents = sorted(set([p.rev() for p in ctx.parents()
if p.rev() in knownrevs]))
mpars = [p.rev() for p in ctx.parents() if
p.rev() != nullrev and p.rev() not in parents]
for mpar in mpars:
gp = gpcache.get(mpar)
if gp is None:
gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
if not gp:
parents.append(mpar)
else:
parents.extend(g for g in gp if g not in parents)
yield (ctx.rev(), CHANGESET, ctx, parents)
def nodes(repo, nodes):
"""cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
This generator function walks the given nodes. It only returns parents
that are in nodes, too.
"""
include = set(nodes)
for node in nodes:
ctx = repo[node]
parents = set([p.rev() for p in ctx.parents() if p.node() in include])
yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
def colored(dag, repo):
"""annotates a DAG with colored edge information
For each DAG node this function emits tuples::
(id, type, data, (col, color), [(col, nextcol, color)])
with the following new elements:
- Tuple (col, color) with column and color index for the current node
- A list of tuples indicating the edges between the current node and its
parents.
"""
seen = []
colors = {}
newcolor = 1
config = {}
for key, val in repo.ui.configitems('graph'):
if '.' in key:
branch, setting = key.rsplit('.', 1)
# Validation
if setting == "width" and val.isdigit():
config.setdefault(branch, {})[setting] = int(val)
elif setting == "color" and val.isalnum():
config.setdefault(branch, {})[setting] = val
if config:
getconf = util.lrucachefunc(
lambda rev: config.get(repo[rev].branch(), {}))
else:
getconf = lambda rev: {}
for (cur, type, data, parents) in dag:
# Compute seen and next
if cur not in seen:
seen.append(cur) # new head
colors[cur] = newcolor
newcolor += 1
col = seen.index(cur)
color = colors.pop(cur)
next = seen[:]
# Add parents to next
addparents = [p for p in parents if p not in next]
next[col:col + 1] = addparents
# Set colors for the parents
for i, p in enumerate(addparents):
if not i:
colors[p] = color
else:
colors[p] = newcolor
newcolor += 1
# Add edges to the graph
edges = []
for ecol, eid in enumerate(seen):
if eid in next:
bconf = getconf(eid)
edges.append((
ecol, next.index(eid), colors[eid],
bconf.get('width', -1),
bconf.get('color', '')))
elif eid == cur:
for p in parents:
bconf = getconf(p)
edges.append((
ecol, next.index(p), color,
bconf.get('width', -1),
bconf.get('color', '')))
# Yield and move on
yield (cur, type, data, (col, color), edges)
seen = next
def grandparent(cl, lowestrev, roots, head):
"""Return all ancestors of head in roots which revision is
greater or equal to lowestrev.
"""
pending = set([head])
seen = set()
kept = set()
llowestrev = max(nullrev, lowestrev)
while pending:
r = pending.pop()
if r >= llowestrev and r not in seen:
if r in roots:
kept.add(r)
else:
pending.update([p for p in cl.parentrevs(r)])
seen.add(r)
return sorted(kept)
def asciiedges(type, char, lines, seen, rev, parents):
"""adds edge info to changelog DAG walk suitable for ascii()"""
if rev not in seen:
seen.append(rev)
nodeidx = seen.index(rev)
knownparents = []
newparents = []
for parent in parents:
if parent in seen:
knownparents.append(parent)
else:
newparents.append(parent)
ncols = len(seen)
nextseen = seen[:]
nextseen[nodeidx:nodeidx + 1] = newparents
edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
while len(newparents) > 2:
# ascii() only knows how to add or remove a single column between two
# calls. Nodes with more than two parents break this constraint so we
# introduce intermediate expansion lines to grow the active node list
# slowly.
edges.append((nodeidx, nodeidx))
edges.append((nodeidx, nodeidx + 1))
nmorecols = 1
yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
char = '\\'
lines = []
nodeidx += 1
ncols += 1
edges = []
del newparents[0]
if len(newparents) > 0:
edges.append((nodeidx, nodeidx))
if len(newparents) > 1:
edges.append((nodeidx, nodeidx + 1))
nmorecols = len(nextseen) - ncols
seen[:] = nextseen
yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
def _fixlongrightedges(edges):
for (i, (start, end)) in enumerate(edges):
if end > start:
edges[i] = (start, end + 1)
def _getnodelineedgestail(
node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
# Still going in the same non-vertical direction.
if n_columns_diff == -1:
start = max(node_index + 1, p_node_index)
tail = ["|", " "] * (start - node_index - 1)
tail.extend(["/", " "] * (n_columns - start))
return tail
else:
return ["\\", " "] * (n_columns - node_index - 1)
else:
return ["|", " "] * (n_columns - node_index - 1)
def _drawedges(edges, nodeline, interline):
for (start, end) in edges:
if start == end + 1:
interline[2 * end + 1] = "/"
elif start == end - 1:
interline[2 * start + 1] = "\\"
elif start == end:
interline[2 * start] = "|"
else:
if 2 * end >= len(nodeline):
continue
nodeline[2 * end] = "+"
if start > end:
(start, end) = (end, start)
for i in range(2 * start + 1, 2 * end):
if nodeline[i] != "+":
nodeline[i] = "-"
def _getpaddingline(ni, n_columns, edges):
line = []
line.extend(["|", " "] * ni)
if (ni, ni - 1) in edges or (ni, ni) in edges:
# (ni, ni - 1) (ni, ni)
# | | | | | | | |
# +---o | | o---+
# | | c | | c | |
# | |/ / | |/ /
# | | | | | |
c = "|"
else:
c = " "
line.extend([c, " "])
line.extend(["|", " "] * (n_columns - ni - 1))
return line
def asciistate():
"""returns the initial value for the "state" argument to ascii()"""
return [0, 0]
def ascii(ui, state, type, char, text, coldata):
"""prints an ASCII graph of the DAG
takes the following arguments (one call per node in the graph):
- ui to write to
- Somewhere to keep the needed state in (init to asciistate())
- Column of the current node in the set of ongoing edges.
- Type indicator of node data, usually 'C' for changesets.
- Payload: (char, lines):
- Character to use as node's symbol.
- List of lines to display as the node's text.
- Edges; a list of (col, next_col) indicating the edges between
the current node and its parents.
- Number of columns (ongoing edges) in the current revision.
- The difference between the number of columns (ongoing edges)
in the next revision and the number of columns (ongoing edges)
in the current revision. That is: -1 means one column removed;
0 means no columns added or removed; 1 means one column added.
"""
idx, edges, ncols, coldiff = coldata
assert -2 < coldiff < 2
if coldiff == -1:
# Transform
#
# | | | | | |
# o | | into o---+
# |X / |/ /
# | | | |
_fixlongrightedges(edges)
# add_padding_line says whether to rewrite
#
# | | | | | | | |
# | o---+ into | o---+
# | / / | | | # <--- padding line
# o | | | / /
# o | |
add_padding_line = (len(text) > 2 and coldiff == -1 and
[x for (x, y) in edges if x + 1 < y])
# fix_nodeline_tail says whether to rewrite
#
# | | o | | | | o | |
# | | |/ / | | |/ /
# | o | | into | o / / # <--- fixed nodeline tail
# | |/ / | |/ /
# o | | o | |
fix_nodeline_tail = len(text) <= 2 and not add_padding_line
# nodeline is the line containing the node character (typically o)
nodeline = ["|", " "] * idx
nodeline.extend([char, " "])
nodeline.extend(
_getnodelineedgestail(idx, state[1], ncols, coldiff,
state[0], fix_nodeline_tail))
# shift_interline is the line containing the non-vertical
# edges between this entry and the next
shift_interline = ["|", " "] * idx
if coldiff == -1:
n_spaces = 1
edge_ch = "/"
elif coldiff == 0:
n_spaces = 2
edge_ch = "|"
else:
n_spaces = 3
edge_ch = "\\"
shift_interline.extend(n_spaces * [" "])
shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
# draw edges from the current node to its parents
_drawedges(edges, nodeline, shift_interline)
# lines is the list of all graph lines to print
lines = [nodeline]
if add_padding_line:
lines.append(_getpaddingline(idx, ncols, edges))
lines.append(shift_interline)
# make sure that there are as many graph lines as there are
# log strings
while len(text) < len(lines):
text.append("")
if len(lines) < len(text):
extra_interline = ["|", " "] * (ncols + coldiff)
while len(lines) < len(text):
lines.append(extra_interline)
# print lines
indentation_level = max(ncols, ncols + coldiff)
for (line, logstr) in zip(lines, text):
ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
ui.write(ln.rstrip() + '\n')
# ... and start over
state[0] = coldiff
state[1] = idx
|
xlhtc007/blaze | refs/heads/master | blaze/tests/test_partition.py | 17 | from blaze.partition import *
from blaze.expr import shape
import numpy as np
x = np.arange(24).reshape(4, 6)
def eq(a, b):
if isinstance(a == b, bool):
return a == b
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
return (a == b).all()
else:
return a == b
def test_partition_get():
assert eq(partition_get(x, (0, slice(0, None)), chunksize=(1, 6)),
x[0, :])
assert eq(partition_get(x, (slice(0, None), 0), chunksize=(4, 1)),
x[:, 0])
assert eq(partition_get(x, (slice(2, 4), slice(0, 2)), chunksize=(2, 2)),
x[2:4, 0:2])
def test_partition_set():
x = np.arange(24).reshape(4, 6)
partition_set(x,
(slice(0, 2), slice(0, 2)), np.array([[1, 1], [1, 1]]),
chunksize=(2, 2))
assert (x[:2, :2] == 1).all()
def test_partition_set_1d():
x = np.arange(24).reshape(4, 6)
partition_set(x,
(slice(0, 4), 0), np.array([[1], [1], [1], [1]]),
chunksize=(4, 1),
keepdims=False)
assert (x[:4, 0] == 1).all()
def test_partitions():
assert list(partitions(x, chunksize=(1, 6))) == \
[(i, slice(0, 6)) for i in range(4)]
assert list(partitions(x, chunksize=(4, 1))) == \
[(slice(0, 4), i) for i in range(6)]
assert list(partitions(x, chunksize=(2, 3))) == [
(slice(0, 2), slice(0, 3)), (slice(0, 2), slice(3, 6)),
(slice(2, 4), slice(0, 3)), (slice(2, 4), slice(3, 6))]
def dont_test_partitions_flat():
assert list(partitions(x, chunksize=(2, 3))) == [
(slice(0, 2), slice(0, 3)), (slice(0, 2), slice(3, 6)),
(slice(2, 4), slice(0, 3)), (slice(2, 4), slice(3, 6))]
def test_uneven_partitions():
x = np.arange(10*12).reshape(10, 12)
parts = list(partitions(x, chunksize=(7, 7)))
assert len(parts) == 2 * 2
assert parts == [(slice(0, 7), slice(0, 7)), (slice(0, 7), slice(7, 12)),
(slice(7, 10), slice(0, 7)), (slice(7, 10), slice(7, 12))]
x = np.arange(20*24).reshape(20, 24)
parts = list(partitions(x, chunksize=(7, 7)))
def test_3d_partitions():
x = np.arange(4*4*6).reshape(4, 4, 6)
parts = list(partitions(x, chunksize=(2, 2, 3)))
assert len(parts) == 2 * 2 * 2
|
flashinnl/vimrc | refs/heads/master | plugin/editorconfig-core-py/editorconfig/fnmatch.py | 1 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
Based on code from fnmatch.py file distributed with Python 2.6.
Licensed under PSF License (see LICENSE.txt file).
Changes to original fnmatch module:
- translate function supports ``*`` and ``**`` similarly to fnmatch C library
"""
import os
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
- ``*`` matches everything except path separator
- ``**`` matches everything
- ``?`` matches any single character
- ``[seq]`` matches any character in seq
- ``[!seq]`` matches any char not in seq
- ``{s1,s2,s3}`` matches any of the strings given (separated by commas)
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normpath(name).replace(os.sep, "/")
return fnmatchcase(name, pat)
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
if not pat in _cache:
res = translate(pat)
_cache[pat] = re.compile(res)
return _cache[pat].match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
escaped = False
while i < n:
c = pat[i]
i = i + 1
if c == '*':
j = i
if j < n and pat[j] == '*':
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and (pat[j] != ']' or escaped):
escaped = pat[j] == '\\' and not escaped
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j]
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
elif c == '{':
j = i
groups = []
while j < n and pat[j] != '}':
k = j
while k < n and (pat[k] not in (',', '}') or escaped):
escaped = pat[k] == '\\' and not escaped
k = k + 1
group = pat[j:k]
for char in (',', '}', '\\'):
group = group.replace('\\' + char, char)
groups.append(group)
j = k
if j < n and pat[j] == ',':
j = j + 1
if j < n and pat[j] == '}':
groups.append('')
if j >= n or len(groups) < 2:
res = res + '\\{'
else:
res = '%s(%s)' % (res, '|'.join(map(re.escape, groups)))
i = j + 1
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
|
idea4bsd/idea4bsd | refs/heads/idea4bsd-master | python/testData/refactoring/unwrap/elseInIfUnwrap_after.py | 83 | z = 3
|
azide0x37/modocDB | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2762 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
bentilly/heroes | refs/heads/development | lib/flask/testsuite/test_apps/config_package_app/__init__.py | 1257 | import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
alon/servo | refs/heads/master | tests/wpt/css-tests/tools/py/py/_log/log.py | 218 | """
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py, sys
class Message(object):
def __init__(self, keywords, args):
self.keywords = keywords
self.args = args
def content(self):
return " ".join(map(str, self.args))
def prefix(self):
return "[%s] " % (":".join(self.keywords))
def __str__(self):
return self.prefix() + self.content()
class Producer(object):
""" (deprecated) Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc. Used extensively by PyPy-1.1.
"""
Message = Message # to allow later customization
keywords2consumer = {}
def __init__(self, keywords, keywordmapper=None, **kw):
if hasattr(keywords, 'split'):
keywords = tuple(keywords.split())
self._keywords = keywords
if keywordmapper is None:
keywordmapper = default_keywordmapper
self._keywordmapper = keywordmapper
def __repr__(self):
return "<py.log.Producer %s>" % ":".join(self._keywords)
def __getattr__(self, name):
if '_' in name:
raise AttributeError(name)
producer = self.__class__(self._keywords + (name,))
setattr(self, name, producer)
return producer
def __call__(self, *args):
""" write a message to the appropriate consumer(s) """
func = self._keywordmapper.getconsumer(self._keywords)
if func is not None:
func(self.Message(self._keywords, args))
class KeywordMapper:
def __init__(self):
self.keywords2consumer = {}
def getstate(self):
return self.keywords2consumer.copy()
def setstate(self, state):
self.keywords2consumer.clear()
self.keywords2consumer.update(state)
def getconsumer(self, keywords):
""" return a consumer matching the given keywords.
tries to find the most suitable consumer by walking, starting from
the back, the list of keywords, the first consumer matching a
keyword is returned (falling back to py.log.default)
"""
for i in range(len(keywords), 0, -1):
try:
return self.keywords2consumer[keywords[:i]]
except KeyError:
continue
return self.keywords2consumer.get('default', default_consumer)
def setconsumer(self, keywords, consumer):
""" set a consumer for a set of keywords. """
# normalize to tuples
if isinstance(keywords, str):
keywords = tuple(filter(None, keywords.split()))
elif hasattr(keywords, '_keywords'):
keywords = keywords._keywords
elif not isinstance(keywords, tuple):
raise TypeError("key %r is not a string or tuple" % (keywords,))
if consumer is not None and not py.builtin.callable(consumer):
if not hasattr(consumer, 'write'):
raise TypeError(
"%r should be None, callable or file-like" % (consumer,))
consumer = File(consumer)
self.keywords2consumer[keywords] = consumer
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
def setconsumer(keywords, consumer):
default_keywordmapper.setconsumer(keywords, consumer)
def setstate(state):
default_keywordmapper.setstate(state)
def getstate():
return default_keywordmapper.getstate()
#
# Consumers
#
class File(object):
""" log consumer wrapping a file(-like) object """
def __init__(self, f):
assert hasattr(f, 'write')
#assert isinstance(f, file) or not hasattr(f, 'open')
self._file = f
def __call__(self, msg):
""" write a message to the log """
self._file.write(str(msg) + "\n")
if hasattr(self._file, 'flush'):
self._file.flush()
class Path(object):
""" log consumer that opens and writes to a Path """
def __init__(self, filename, append=False,
delayed_create=False, buffering=False):
self._append = append
self._filename = str(filename)
self._buffering = buffering
if not delayed_create:
self._openfile()
def _openfile(self):
mode = self._append and 'a' or 'w'
f = open(self._filename, mode)
self._file = f
def __call__(self, msg):
""" write a message to the log """
if not hasattr(self, "_file"):
self._openfile()
self._file.write(str(msg) + "\n")
if not self._buffering:
self._file.flush()
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
class Syslog:
""" consumer that writes to the syslog daemon """
def __init__(self, priority = None):
if priority is None:
priority = self.LOG_INFO
self.priority = priority
def __call__(self, msg):
""" write a message to the log """
py.std.syslog.syslog(self.priority, str(msg))
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
except AttributeError:
pass
|
thekovinc/ds-for-wall-street | refs/heads/master | tsanalysis/tsutil.py | 4 | import numpy as np
from sparkts.timeseriesrdd import time_series_rdd_from_pandas_series_rdd
def count_nans(vec):
return np.count_nonzero(np.isnan(vec))
def lead_and_lag(lead, lag, series):
"""Given a series, return a 2D array with lead and lag terms.
The returned array with have (lead + lag + 1) columns and (len(series) - lead - lag) rows.
Parameters
----------
lead - The number of lead terms to include in the result.
lag - The number of lag terms to include in the result.
series - The series to lead and lag.
"""
series = np.transpose(series)
mat = np.zeros([len(series) - lead - lag, lead + lag + 1])
mat[:,0] = series[lag:-lead]
mat[:,lead] = series[lag+lead:]
for i in xrange(1, lead):
mat[:,i] = series[lag+i:-lead+i]
for i in xrange(1, lag + 1):
mat[:,i+lead] = series[lag-i:-lead-i]
return mat
def sample_daily(tsrdd, how):
"""Accepts a TimeSeriesRDD with granularity finer than daily and rolls it up.
Parameters
----------
tsrdd - A TimeSeriesRDD.
how - The sampling method.
Returns
-------
A condensed TimeSeriesRDD.
"""
return time_series_rdd_from_pandas_series_rdd( \
tsrdd.to_pandas_series_rdd() \
.mapValues(lambda x: x.resample('D', how=how)), tsrdd.ctx)
|
jgriff5/yelp-api | refs/heads/master | v2/python/sample.py | 35 | # -*- coding: utf-8 -*-
"""
Yelp API v2.0 code sample.
This program demonstrates the capability of the Yelp API version 2.0
by using the Search API to query for businesses by a search term and location,
and the Business API to query additional information about the top result
from the search query.
Please refer to http://www.yelp.com/developers/documentation for the API documentation.
This program requires the Python oauth2 library, which you can install via:
`pip install -r requirements.txt`.
Sample usage of the program:
`python sample.py --term="bars" --location="San Francisco, CA"`
"""
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import oauth2
API_HOST = 'api.yelp.com'
DEFAULT_TERM = 'dinner'
DEFAULT_LOCATION = 'San Francisco, CA'
SEARCH_LIMIT = 3
SEARCH_PATH = '/v2/search/'
BUSINESS_PATH = '/v2/business/'
# OAuth credential placeholders that must be filled in by users.
CONSUMER_KEY = None
CONSUMER_SECRET = None
TOKEN = None
TOKEN_SECRET = None
def request(host, path, url_params=None):
"""Prepares OAuth authentication and sends the request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
urllib2.HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8')))
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params)
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print u'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.loads(conn.read())
finally:
conn.close()
return response
def search(term, location):
"""Query the Search API by a search term and location.
Args:
term (str): The search term passed to the API.
location (str): The search location passed to the API.
Returns:
dict: The JSON response from the request.
"""
url_params = {
'term': term.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': SEARCH_LIMIT
}
return request(API_HOST, SEARCH_PATH, url_params=url_params)
def get_business(business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path)
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
response = search(term, location)
businesses = response.get('businesses')
if not businesses:
print u'No businesses for {0} in {1} found.'.format(term, location)
return
business_id = businesses[0]['id']
print u'{0} businesses found, querying business info for the top result "{1}" ...'.format(
len(businesses),
business_id
)
response = get_business(business_id)
print u'Result for business "{0}" found:'.format(business_id)
pprint.pprint(response, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM, type=str, help='Search term (default: %(default)s)')
parser.add_argument('-l', '--location', dest='location', default=DEFAULT_LOCATION, type=str, help='Search location (default: %(default)s)')
input_values = parser.parse_args()
try:
query_api(input_values.term, input_values.location)
except urllib2.HTTPError as error:
sys.exit('Encountered HTTP error {0}. Abort program.'.format(error.code))
if __name__ == '__main__':
main()
|
Kriechi/mitmproxy | refs/heads/main | mitmproxy/addons/cut.py | 2 | import io
import csv
import typing
import os.path
from mitmproxy import command
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import ctx
from mitmproxy import certs
from mitmproxy.utils import strutils
import mitmproxy.types
import pyperclip
def headername(spec: str):
if not (spec.startswith("header[") and spec.endswith("]")):
raise exceptions.CommandError("Invalid header spec: %s" % spec)
return spec[len("header["):-1].strip()
def is_addr(v):
return isinstance(v, tuple) and len(v) > 1
def extract(cut: str, f: flow.Flow) -> typing.Union[str, bytes]:
path = cut.split(".")
current: typing.Any = f
for i, spec in enumerate(path):
if spec.startswith("_"):
raise exceptions.CommandError("Can't access internal attribute %s" % spec)
part = getattr(current, spec, None)
if i == len(path) - 1:
if spec == "port" and is_addr(current):
return str(current[1])
if spec == "host" and is_addr(current):
return str(current[0])
elif spec.startswith("header["):
if not current:
return ""
return current.headers.get(headername(spec), "")
elif isinstance(part, bytes):
return part
elif isinstance(part, bool):
return "true" if part else "false"
elif isinstance(part, certs.Cert): # pragma: no cover
return part.to_pem().decode("ascii")
elif isinstance(part, list) and len(part) > 0 and isinstance(part[0], certs.Cert):
# TODO: currently this extracts only the very first cert as PEM-encoded string.
return part[0].to_pem().decode("ascii")
current = part
return str(current or "")
class Cut:
@command.command("cut")
def cut(
self,
flows: typing.Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
) -> mitmproxy.types.Data:
"""
Cut data from a set of flows. Cut specifications are attribute paths
from the base of the flow object, with a few conveniences - "port"
and "host" retrieve parts of an address tuple, ".header[key]"
retrieves a header value. Return values converted to strings or
bytes: SSL certificates are converted to PEM format, bools are "true"
or "false", "bytes" are preserved, and all other values are
converted to strings.
"""
ret: typing.List[typing.List[typing.Union[str, bytes]]] = []
for f in flows:
ret.append([extract(c, f) for c in cuts])
return ret # type: ignore
@command.command("cut.save")
def save(
self,
flows: typing.Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
path: mitmproxy.types.Path
) -> None:
"""
Save cuts to file. If there are multiple flows or cuts, the format
is UTF-8 encoded CSV. If there is exactly one row and one column,
the data is written to file as-is, with raw bytes preserved. If the
path is prefixed with a "+", values are appended if there is an
existing file.
"""
append = False
if path.startswith("+"):
append = True
epath = os.path.expanduser(path[1:])
path = mitmproxy.types.Path(epath)
try:
if len(cuts) == 1 and len(flows) == 1:
with open(path, "ab" if append else "wb") as fp:
if fp.tell() > 0:
# We're appending to a file that already exists and has content
fp.write(b"\n")
v = extract(cuts[0], flows[0])
if isinstance(v, bytes):
fp.write(v)
else:
fp.write(v.encode("utf8"))
ctx.log.alert("Saved single cut.")
else:
with open(path, "a" if append else "w", newline='', encoding="utf8") as tfp:
writer = csv.writer(tfp)
for f in flows:
vals = [extract(c, f) for c in cuts]
writer.writerow(
[strutils.always_str(x) or "" for x in vals] # type: ignore
)
ctx.log.alert("Saved %s cuts over %d flows as CSV." % (len(cuts), len(flows)))
except OSError as e:
ctx.log.error(str(e))
@command.command("cut.clip")
def clip(
self,
flows: typing.Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
) -> None:
"""
Send cuts to the clipboard. If there are multiple flows or cuts, the
format is UTF-8 encoded CSV. If there is exactly one row and one
column, the data is written to file as-is, with raw bytes preserved.
"""
v: typing.Union[str, bytes]
fp = io.StringIO(newline="")
if len(cuts) == 1 and len(flows) == 1:
v = extract(cuts[0], flows[0])
fp.write(strutils.always_str(v)) # type: ignore
ctx.log.alert("Clipped single cut.")
else:
writer = csv.writer(fp)
for f in flows:
vals = [extract(c, f) for c in cuts]
writer.writerow(
[strutils.always_str(v) for v in vals]
)
ctx.log.alert("Clipped %s cuts as CSV." % len(cuts))
try:
pyperclip.copy(fp.getvalue())
except pyperclip.PyperclipException as e:
ctx.log.error(str(e))
|
dcwatson/django-pgcrypto | refs/heads/master | testapp/tests.py | 1 | import datetime
import decimal
import json
import os
import unittest
from cryptography.hazmat.primitives.ciphers.algorithms import AES, Blowfish
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import connections, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from pgcrypto import __version__, armor, dearmor, pad, unpad
from pgcrypto.fields import BaseEncryptedField
from .models import Employee
class CryptoTests(unittest.TestCase):
def setUp(self):
# This is the expected Blowfish-encrypted value, according to the following pgcrypto call:
# select encrypt('sensitive information', 'pass', 'bf');
self.encrypt_bf = b"x\364r\225\356WH\347\240\205\211a\223I{~\233\034\347\217/f\035\005"
# The basic "encrypt" call assumes an all-NUL IV of the appropriate block size.
self.iv_blowfish = b"\0" * Blowfish.block_size
# This is the expected AES-encrypted value, according to the following pgcrypto call:
# select encrypt('sensitive information', 'pass', 'aes');
self.encrypt_aes = b"\263r\011\033]Q1\220\340\247\317Y,\321q\224KmuHf>Z\011M\032\316\376&z\330\344"
# The basic "encrypt" call assumes an all-NUL IV of the appropriate block size.
self.iv_aes = b"\0" * AES.block_size
# When encrypting a string whose length is a multiple of the block size, pgcrypto
# tacks on an extra block of padding, so it can reliably unpad afterwards. This
# data was generated from the following query (string length = 16):
# select encrypt('xxxxxxxxxxxxxxxx', 'secret', 'aes');
self.encrypt_aes_padded = (
b"5M\304\316\240B$Z\351\021PD\317\213\213\234f\225L \342\004SIX\030\331S\376\371\220\\"
)
def test_encrypt(self):
f = BaseEncryptedField(cipher="bf", key=b"pass")
self.assertEqual(f.encrypt(pad(b"sensitive information", f.block_size)), self.encrypt_bf)
def test_decrypt(self):
f = BaseEncryptedField(cipher="bf", key=b"pass")
self.assertEqual(unpad(f.decrypt(self.encrypt_bf), f.block_size), b"sensitive information")
def test_armor_dearmor(self):
a = armor(self.encrypt_bf)
self.assertEqual(dearmor(a), self.encrypt_bf)
def test_aes(self):
f = BaseEncryptedField(cipher="aes", key=b"pass")
self.assertEqual(f.encrypt(pad(b"sensitive information", f.block_size)), self.encrypt_aes)
def test_aes_pad(self):
f = BaseEncryptedField(cipher="aes", key=b"secret")
self.assertEqual(unpad(f.decrypt(self.encrypt_aes_padded), f.block_size), b"xxxxxxxxxxxxxxxx")
class FieldTests(TestCase):
fixtures = ("employees",)
def setUp(self):
# Normally, you would use django.contrib.postgres.operations.CryptoExtension in migrations.
c = connections["default"].cursor()
c.execute("CREATE EXTENSION IF NOT EXISTS pgcrypto")
def test_query(self):
fixture_path = os.path.join(os.path.dirname(__file__), "fixtures", "employees.json")
for obj in json.load(open(fixture_path, "r")):
if obj["model"] == "core.employee":
e = Employee.objects.get(ssn=obj["fields"]["ssn"])
self.assertEqual(e.pk, int(obj["pk"]))
self.assertEqual(e.age, 42)
self.assertEqual(e.salary, decimal.Decimal(obj["fields"]["salary"]))
self.assertEqual(e.date_hired.isoformat(), obj["fields"]["date_hired"])
def test_decimal_lookups(self):
self.assertEqual(Employee.objects.filter(salary=decimal.Decimal("75248.77")).count(), 1)
self.assertEqual(Employee.objects.filter(salary__gte=decimal.Decimal("75248.77")).count(), 1)
self.assertEqual(Employee.objects.filter(salary__gt=decimal.Decimal("75248.77")).count(), 0)
self.assertEqual(Employee.objects.filter(salary__gte=decimal.Decimal("70000.00")).count(), 1)
self.assertEqual(Employee.objects.filter(salary__lte=decimal.Decimal("70000.00")).count(), 1)
self.assertEqual(Employee.objects.filter(salary__lt=decimal.Decimal("52000")).count(), 0)
def test_date_lookups(self):
self.assertEqual(Employee.objects.filter(date_hired="1999-01-23").count(), 1)
self.assertEqual(Employee.objects.filter(date_hired__gte="1999-01-01").count(), 1)
self.assertEqual(Employee.objects.filter(date_hired__gt="1981-01-01").count(), 2)
def test_multi_lookups(self):
self.assertEqual(Employee.objects.filter(date_hired__gt="1981-01-01", salary__lt=60000).count(), 1)
def test_model_validation(self):
obj = Employee(name="Invalid User", date_hired="2000-01-01", email="invalid")
try:
obj.full_clean()
self.fail("Invalid employee object passed validation")
except ValidationError as e:
for f in ("salary", "email"):
self.assertIn(f, e.error_dict)
def test_blank(self):
obj = Employee.objects.create(name="Test User", date_hired=datetime.date.today(), email="test@example.com")
self.assertEqual(obj.ssn, "")
obj.refresh_from_db()
self.assertEqual(obj.ssn, "")
self.assertEqual(Employee.objects.filter(ssn="").count(), 1)
def test_unique(self):
with transaction.atomic():
try:
Employee.objects.create(name="Duplicate", date_hired="2000-01-01", email="johnson.sally@example.com")
self.fail("Created duplicate email (should be unique).")
except IntegrityError:
pass
# Make sure we can create another record with a NULL value for a unique field.
e = Employee.objects.create(name="NULL Email", date_hired="2000-01-01", email=None)
e = Employee.objects.get(pk=e.pk)
self.assertIs(e.email, None)
self.assertEqual(Employee.objects.filter(email__isnull=True).count(), 2)
def test_auto_now(self):
e = Employee.objects.create(name="Joe User", ssn="12345", salary=42000)
self.assertEqual(e.date_hired, datetime.date.today())
self.assertEqual(e.date_modified, Employee.objects.get(pk=e.pk).date_modified)
def test_formfields(self):
expected = {
"name": forms.CharField,
"age": forms.IntegerField,
"ssn": forms.CharField,
"salary": forms.DecimalField,
"date_hired": forms.DateField,
"email": forms.EmailField,
"date_modified": forms.DateTimeField,
}
actual = {f.name: type(f.formfield()) for f in Employee._meta.fields if not f.primary_key}
self.assertEqual(actual, expected)
def test_raw_versioned(self):
e = Employee.objects.get(ssn="666-27-9811")
version_check = "Version: django-pgcrypto %s" % __version__
raw_ssn = e.raw.ssn
# Check that the correct version was stored.
self.assertIn(version_check, raw_ssn)
# Check that SECRET_KEY was used by default.
f = BaseEncryptedField(key=settings.SECRET_KEY)
self.assertEqual(f.to_python(raw_ssn), e.ssn)
# Check that trying to decrypt with a bad key is (probably) gibberish.
with self.assertRaises(UnicodeDecodeError):
f = BaseEncryptedField(key="badkeyisaverybadkey")
f.to_python(raw_ssn)
|
ruschelp/cortex-vfx | refs/heads/master | test/IECore/RadixSortTest.py | 12 | ##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import random
import unittest
from IECore import *
class RadixSortTest( unittest.TestCase ) :
def testFloat( self ) :
random.seed( 12 )
s = RadixSort()
d = FloatVectorData()
for i in range( 0, 10000 ):
d.append( random.uniform( FloatData().minValue, FloatData().maxValue ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
def testInt( self ) :
random.seed( 13 )
s = RadixSort()
d = IntVectorData()
for i in range( 0, 10000 ):
d.append( int( random.uniform( IntData().minValue, IntData().maxValue ) ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
def testUInt( self ) :
random.seed( 14 )
s = RadixSort()
d = UIntVectorData()
for i in range( 0, 10000 ):
d.append( int( random.uniform( UIntData().minValue, UIntData().maxValue ) ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
if __name__ == "__main__":
unittest.main()
|
brandond/ansible | refs/heads/devel | test/units/modules/storage/netapp/test_na_ontap_unix_user.py | 43 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_unix_user \
import NetAppOntapUnixUser as user_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'user':
xml = self.build_user_info(self.params)
elif self.kind == 'user-fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_user_info(data):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = \
{'attributes-list': {'unix-user-info': {'user-id': data['id'],
'group-id': data['group_id'], 'full-name': data['full_name']}},
'num-records': 1}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_user = {
'name': 'test',
'id': '11',
'group_id': '12',
'vserver': 'something',
'full_name': 'Test User'
}
def mock_args(self):
return {
'name': self.mock_user['name'],
'group_id': self.mock_user['group_id'],
'id': self.mock_user['id'],
'vserver': self.mock_user['vserver'],
'full_name': self.mock_user['full_name'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_user_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_unix_user object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_unix_user object
"""
obj = user_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_user
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
user_module()
def test_get_nonexistent_user(self):
''' Test if get_unix_user returns None for non-existent user '''
set_module_args(self.mock_args())
result = self.get_user_mock_object().get_unix_user()
assert result is None
def test_get_existing_user(self):
''' Test if get_unix_user returns details for existing user '''
set_module_args(self.mock_args())
result = self.get_user_mock_object('user').get_unix_user()
assert result['full_name'] == self.mock_user['full_name']
def test_get_xml(self):
set_module_args(self.mock_args())
obj = self.get_user_mock_object('user')
result = obj.get_unix_user()
assert obj.server.xml_in['query']
assert obj.server.xml_in['query']['unix-user-info']
user_info = obj.server.xml_in['query']['unix-user-info']
assert user_info['user-name'] == self.mock_user['name']
assert user_info['vserver'] == self.mock_user['vserver']
def test_create_error_missing_params(self):
data = self.mock_args()
del data['group_id']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user').create_unix_user()
assert 'Error: Missing one or more required parameters for create: (group_id, id)' == exc.value.args[0]['msg']
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
def test_create_called(self, create_user):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_user_mock_object().apply()
assert exc.value.args[0]['changed']
create_user.assert_called_with()
def test_create_xml(self):
'''Test create ZAPI element'''
set_module_args(self.mock_args())
create = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
create.apply()
mock_key = {
'user-name': 'name',
'group-id': 'group_id',
'user-id': 'id',
'full-name': 'full_name'
}
for key in ['user-name', 'user-id', 'group-id', 'full-name']:
assert create.server.xml_in[key] == self.mock_user[mock_key[key]]
def test_create_wihtout_full_name(self):
'''Test create ZAPI element'''
data = self.mock_args()
del data['full_name']
set_module_args(data)
create = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
create.apply()
with pytest.raises(KeyError):
create.server.xml_in['full-name']
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
def test_delete_called(self, delete_user, modify_user):
''' Test delete existing user '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_user_mock_object('user').apply()
assert exc.value.args[0]['changed']
delete_user.assert_called_with()
assert modify_user.call_count == 0
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.get_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
def test_modify_called(self, modify_user, get_user):
''' Test modify user group_id '''
data = self.mock_args()
data['group_id'] = 20
set_module_args(data)
get_user.return_value = {'group_id': 10}
obj = self.get_user_mock_object('user')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
get_user.assert_called_with()
modify_user.assert_called_with({'group_id': 20})
def test_modify_only_id(self):
''' Test modify user id '''
set_module_args(self.mock_args())
modify = self.get_user_mock_object('user')
modify.modify_unix_user({'id': 123})
assert modify.server.xml_in['user-id'] == '123'
with pytest.raises(KeyError):
modify.server.xml_in['group-id']
with pytest.raises(KeyError):
modify.server.xml_in['full-name']
def test_modify_xml(self):
''' Test modify user full_name '''
set_module_args(self.mock_args())
modify = self.get_user_mock_object('user')
modify.modify_unix_user({'full_name': 'New Name',
'group_id': '25'})
assert modify.server.xml_in['user-name'] == self.mock_user['name']
assert modify.server.xml_in['full-name'] == 'New Name'
assert modify.server.xml_in['group-id'] == '25'
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
def test_do_nothing(self, modify, delete, create):
''' changed is False and none of the opetaion methods are called'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
obj = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
create.assert_not_called()
delete.assert_not_called()
modify.assert_not_called()
def test_get_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').get_unix_user()
assert 'Error getting UNIX user' in exc.value.args[0]['msg']
def test_create_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').create_unix_user()
assert 'Error creating UNIX user' in exc.value.args[0]['msg']
def test_modify_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').modify_unix_user({'id': '123'})
assert 'Error modifying UNIX user' in exc.value.args[0]['msg']
def test_delete_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').delete_unix_user()
assert 'Error removing UNIX user' in exc.value.args[0]['msg']
|
hone5t/pyquick | refs/heads/master | basic/solution/string2.py | 208 | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
# LAB(begin solution)
if len(s) >= 3:
if s[-3:] != 'ing': s = s + 'ing'
else: s = s + 'ly'
return s
# LAB(replace solution)
# return
# LAB(end solution)
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
# LAB(begin solution)
n = s.find('not')
b = s.find('bad')
if n != -1 and b != -1 and b > n:
s = s[:n] + 'good' + s[b+3:]
return s
# LAB(replace solution)
# return
# LAB(end solution)
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
# LAB(begin solution)
# Figure out the middle position of each string.
a_middle = len(a) / 2
b_middle = len(b) / 2
if len(a) % 2 == 1: # add 1 if length is odd
a_middle = a_middle + 1
if len(b) % 2 == 1:
b_middle = b_middle + 1
return a[:a_middle] + b[:b_middle] + a[a_middle:] + b[b_middle:]
# LAB(replace solution)
# return
# LAB(end solution)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
|
PublicHealthEngland/pygom | refs/heads/master | tests/test_ode_decomposition.py | 2 | from unittest import main, TestCase
import numpy
import sympy
from pygom import SimulateOde, Transition, TransitionType
from pygom.model import common_models
class TestOdeDecomposition(TestCase):
def test_simple(self):
ode1 = Transition('S', '-beta*S*I', 'ode')
ode2 = Transition('I', 'beta*S*I - gamma * I', 'ode')
ode3 = Transition('R', 'gamma*I', 'ode')
state_list = ['S', 'I', 'R']
param_list = ['beta', 'gamma']
ode = SimulateOde(state_list, param_list, ode=[ode1, ode2, ode3])
ode2 = ode.get_unrolled_obj()
diffEqZero = map(lambda x: x==0, sympy.simplify(ode.get_ode_eqn() - ode2.get_ode_eqn()))
self.assertTrue(numpy.all(numpy.array(list(diffEqZero))))
# if numpy.any(numpy.array(list(diffEqZero)) is False):
# raise Exception("Simple: SIR Decomposition failed")
def test_hard(self):
# the SLIARD model is considered to be hard because a state can
# go to multiple state. This is not as hard as the SEIHFR model
# below.
state_list = ['S', 'L', 'I', 'A', 'R', 'D']
param_list = ['beta', 'p', 'kappa', 'alpha', 'f', 'delta', 'epsilon', 'N']
ode_list = [
Transition('S', '- beta * S/N * ( I + delta * A)', 'ODE'),
Transition('L', 'beta * S/N * (I + delta * A) - kappa * L', 'ODE'),
Transition('I', 'p * kappa * L - alpha * I', 'ODE'),
Transition('A', '(1-p) * kappa * L - epsilon * A', 'ODE'),
Transition('R', 'f * alpha * I + epsilon * A', 'ODE'),
Transition('D', '(1-f) * alpha * I', 'ODE')
]
ode = SimulateOde(state_list, param_list, ode=ode_list)
ode2 = ode.get_unrolled_obj()
diffEqZero = map(lambda x: x==0, sympy.simplify(ode.get_ode_eqn() - ode2.get_ode_eqn()))
self.assertTrue(numpy.all(numpy.array(list(diffEqZero))))
def test_bd(self):
state_list = ['S', 'I', 'R']
param_list = ['beta', 'gamma', 'B', 'mu']
ode_list = [
Transition(origin='S',
equation='-beta * S * I + B - mu * S',
transition_type=TransitionType.ODE),
Transition(origin='I',
equation='beta * S * I - gamma * I - mu * I',
transition_type=TransitionType.ODE),
Transition(origin='R',
destination='R',
equation='gamma * I',
transition_type=TransitionType.ODE)
]
ode = SimulateOde(state_list, param_list, ode=ode_list)
ode2 = ode.get_unrolled_obj()
diffEqZero = map(lambda x: x==0, sympy.simplify(ode.get_ode_eqn() - ode2.get_ode_eqn()))
self.assertTrue(numpy.all(numpy.array(list(diffEqZero))))
def test_derived_param(self):
# the derived parameters are treated separately when compared to the
# normal parameters and the odes
ode = common_models.Legrand_Ebola_SEIHFR()
ode_list = [
Transition('S', '-(beta_I*S*I + beta_H_Time*S*H + beta_F_Time*S*F)'),
Transition('E', '(beta_I*S*I + beta_H_Time*S*H + beta_F_Time*S*F) - alpha*E'),
Transition('I', '-gamma_I*(1 - theta_1)*(1 - delta_1)*I - gamma_D*(1 - theta_1)*delta_1*I - gamma_H*theta_1*I + alpha*E'),
Transition('H', 'gamma_H*theta_1*I - gamma_DH*delta_2*H - gamma_IH*(1 - delta_2)*H'),
Transition('F', '- gamma_F*F + gamma_DH*delta_2*H + gamma_D*(1 - theta_1)*delta_1*I'),
Transition('R', 'gamma_I*(1 - theta_1)*(1 - delta_1)*I + gamma_F*F + gamma_IH*(1 - delta_2)*H'),
Transition('tau', '1')
]
ode1 = SimulateOde(ode.state_list, ode.param_list, ode._derivedParamEqn, ode=ode_list)
ode2 = ode1.get_unrolled_obj()
diffEqZero = map(lambda x: x==0, sympy.simplify(ode.get_ode_eqn() - ode2.get_ode_eqn()))
self.assertTrue(numpy.all(numpy.array(list(diffEqZero))))
if __name__ == '__main__':
main()
|
christoph-buente/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update_unittest.py | 120 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.config.ports import MacPort, MacWK2Port
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.update import Update
class UpdateTest(unittest.TestCase):
def test_update_command_non_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=True)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
def test_update_command_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=False)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
|
dishad/CSCI2963 | refs/heads/master | lab6/markdown.py | 4 | """
Markdown.py
0. just print whatever is passed in to stdin
0. if filename passed in as a command line parameter,
then print file instead of stdin
1. wrap input in paragraph tags
2. convert single asterisk or underscore pairs to em tags
3. convert double asterisk or underscore pairs to strong tags
"""
import fileinput
import re
def convertStrong(line):
line = re.sub(r'\*\*(.*)\*\*', r'<strong>\1</strong>', line)
line = re.sub(r'__(.*)__', r'<strong>\1</strong>', line)
return line
def convertEm(line):
line = re.sub(r'\*(.*)\*', r'<em>\1</em>', line)
line = re.sub(r'_(.*)_', r'<em>\1</em>', line)
return line
def convertH1(line):
if line[:1] == '#':
return '<h1>'+line[1:]+'</h1>'
else:
return line
def convertH2(line):
if line[:2] == '##':
return '<h2>'+line[2:]+'</h2>'
else:
return line
def convertH3(line):
if line[:3] == '###':
return '<h3>'+line[3:]+'</h3>'
else:
return line
def block(line, bq):
if line[0] == '>':
if bq == False:
return '<blockquote>'+line[1:]
else:
return line[1:]
else:
if bq == True:
return '</blockquote>'+line
else:
return line
bqmode = False
for line in fileinput.input():
line = line.rstrip()
line = convertStrong(line)
line = convertEm(line)
# test headings in reverse order
line = convertH3(line)
line = convertH2(line)
line = convertH1(line)
line = block(line, bqmode)
if '<blockquote>' in line:
bqmode = True
if '</blockquote>' in line:
bqmode = False
print '<p>' + line + '</p>', |
jerome-jacob/selenium | refs/heads/master | py/test/selenium/webdriver/common/typing_tests.py | 60 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class TypingTests(unittest.TestCase):
def testShouldFireKeyPressEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("press:" in result.text)
def testShouldFireKeyDownEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("I")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("down" in result.text)
def testShouldFireKeyUpEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("up:" in result.text)
def testShouldTypeLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
self.assertEqual(keyReporter.get_attribute("value"), "abc def")
def testShouldBeAbleToTypeCapitalLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("ABC DEF")
self.assertEqual(keyReporter.get_attribute("value"), "ABC DEF")
def testShouldBeAbleToTypeQuoteMarks(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("\"")
self.assertEqual(keyReporter.get_attribute("value"), "\"")
def testShouldBeAbleToTypeTheAtCharacter(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("@")
self.assertEqual(keyReporter.get_attribute("value"), "@")
def testShouldBeAbleToMixUpperAndLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("me@eXample.com")
self.assertEqual(keyReporter.get_attribute("value"), "me@eXample.com")
def testArrowKeysShouldNotBePrintable(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys(Keys.ARROW_LEFT)
self.assertEqual(keyReporter.get_attribute("value"), "")
def testShouldBeAbleToUseArrowKeys(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("Tet", Keys.ARROW_LEFT, "s")
self.assertEqual(keyReporter.get_attribute("value"), "Test")
def testWillSimulateAKeyUpWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUp")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDown")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPress")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyUpWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUpArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDownArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPressArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testShouldReportKeyCodeOfArrowKeysUpDownEvents(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(Keys.ARROW_DOWN)
self.assertTrue("down: 40" in result.text.strip())
self.assertTrue("up: 40" in result.text.strip())
element.send_keys(Keys.ARROW_UP)
self.assertTrue("down: 38" in result.text.strip())
self.assertTrue("up: 38" in result.text.strip())
element.send_keys(Keys.ARROW_LEFT)
self.assertTrue("down: 37" in result.text.strip())
self.assertTrue("up: 37" in result.text.strip())
element.send_keys(Keys.ARROW_RIGHT)
self.assertTrue("down: 39" in result.text.strip())
self.assertTrue("up: 39" in result.text.strip())
# And leave no rubbish/printable keys in the "keyReporter"
self.assertEqual(element.get_attribute("value"), "")
def testNumericNonShiftKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericLineCharsNonShifted = "`1234567890-=[]\\,.'/42"
element.send_keys(numericLineCharsNonShifted)
self.assertEqual(element.get_attribute("value"), numericLineCharsNonShifted)
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
#reason = "untested user agent")
def testNumericShiftKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericShiftsEtc = "~!@#$%^&*()_+{}:i\"<>?|END~"
element.send_keys(numericShiftsEtc)
self.assertEqual(element.get_attribute("value"), numericShiftsEtc)
self.assertTrue(" up: 16" in result.text.strip())
def testLowerCaseAlphaKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
lowerAlphas = "abcdefghijklmnopqrstuvwxyz"
element.send_keys(lowerAlphas)
self.assertEqual(element.get_attribute("value"), lowerAlphas)
def testUppercaseAlphaKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
upperAlphas = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
element.send_keys(upperAlphas)
self.assertEqual(element.get_attribute("value"), upperAlphas)
self.assertTrue(" up: 16" in result.text.strip())
def testAllPrintableKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
allPrintable = "!\"#$%&'()*+,-./0123456789:<=>?@ ABCDEFGHIJKLMNOPQRSTUVWXYZ [\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
element.send_keys(allPrintable)
self.assertTrue(element.get_attribute("value"), allPrintable)
self.assertTrue(" up: 16" in result.text.strip())
def testArrowKeysAndPageUpAndDown(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("a" + Keys.LEFT + "b" + Keys.RIGHT +
Keys.UP + Keys.DOWN + Keys.PAGE_UP + Keys.PAGE_DOWN + "1")
self.assertEqual(element.get_attribute("value"), "ba1")
#def testHomeAndEndAndPageUpAndPageDownKeys(self):
# // FIXME: macs don't have HOME keys, would PGUP work?
# if (Platform.getCurrent().is(Platform.MAC)) {
# return
# }
# self._loadPage("javascriptPage")
# element = self.driver.find_element(by=By.ID, value="keyReporter")
# element.send_keys("abc" + Keys.HOME + "0" + Keys.LEFT + Keys.RIGHT +
# Keys.PAGE_UP + Keys.PAGE_DOWN + Keys.END + "1" + Keys.HOME +
# "0" + Keys.PAGE_UP + Keys.END + "111" + Keys.HOME + "00")
# self.assertThat(element.get_attribute("value"), is("0000abc1111"))
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testDeleteAndBackspaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcdefghi")
self.assertEqual(element.get_attribute("value"), "abcdefghi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcdefgi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.BACK_SPACE)
self.assertEqual(element.get_attribute("value"), "abcdfgi")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE}, reason = "untested user agents")
def testSpecialSpaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.SPACE + "fgh" + Keys.SPACE + "ij")
self.assertEqual(element.get_attribute("value"), "abcd fgh ij")
def testNumberpadAndFunctionKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.MULTIPLY + Keys.SUBTRACT + Keys.ADD +
Keys.DECIMAL + Keys.SEPARATOR + Keys.NUMPAD0 + Keys.NUMPAD9 +
Keys.ADD + Keys.SEMICOLON + Keys.EQUALS + Keys.DIVIDE +
Keys.NUMPAD3 + "abcd")
self.assertEqual(element.get_attribute("value"), "abcd*-+.,09+;=/3abcd")
element.clear()
element.send_keys("FUNCTION" + Keys.F2 + "-KEYS" + Keys.F2)
element.send_keys("" + Keys.F2 + "-TOO" + Keys.F2)
self.assertEqual(element.get_attribute("value"), "FUNCTION-KEYS-TOO")
def testShiftSelectionDeletes(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd efgh")
self.assertEqual(element.get_attribute("value"), "abcd efgh")
element.send_keys(Keys.SHIFT, Keys.LEFT, Keys.LEFT, Keys.LEFT)
element.send_keys(Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcd e")
def testShouldTypeIntoInputElementsThatHaveNoTypeAttribute(self):
self._loadPage("formPage")
element = self.driver.find_element(by=By.ID, value="no-type")
element.send_keys("Should Say Cheese")
self.assertEqual(element.get_attribute("value"), "Should Say Cheese")
def testShouldTypeAnInteger(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(1234)
self.assertEqual(element.get_attribute("value"), "1234")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
infoxchange/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/regressiontests/defaultfilters/tests.py | 38 | # -*- coding: utf-8 -*-
import datetime
import unittest
from django.template.defaultfilters import *
class DefaultFiltersTests(unittest.TestCase):
def test_floatformat(self):
self.assertEqual(floatformat(7.7), u'7.7')
self.assertEqual(floatformat(7.0), u'7')
self.assertEqual(floatformat(0.7), u'0.7')
self.assertEqual(floatformat(0.07), u'0.1')
self.assertEqual(floatformat(0.007), u'0.0')
self.assertEqual(floatformat(0.0), u'0')
self.assertEqual(floatformat(7.7, 3), u'7.700')
self.assertEqual(floatformat(6.000000, 3), u'6.000')
self.assertEqual(floatformat(6.200000, 3), u'6.200')
self.assertEqual(floatformat(6.200000, -3), u'6.200')
self.assertEqual(floatformat(13.1031, -3), u'13.103')
self.assertEqual(floatformat(11.1197, -2), u'11.12')
self.assertEqual(floatformat(11.0000, -2), u'11')
self.assertEqual(floatformat(11.000001, -2), u'11.00')
self.assertEqual(floatformat(8.2798, 3), u'8.280')
self.assertEqual(floatformat(u'foo'), u'')
self.assertEqual(floatformat(13.1031, u'bar'), u'13.1031')
self.assertEqual(floatformat(18.125, 2), u'18.13')
self.assertEqual(floatformat(u'foo', u'bar'), u'')
self.assertEqual(floatformat(u'¿Cómo esta usted?'), u'')
self.assertEqual(floatformat(None), u'')
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), unicode(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), unicode(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), unicode(nan))
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), u'11.00')
def test_addslashes(self):
self.assertEqual(addslashes(u'"double quotes" and \'single quotes\''),
u'\\"double quotes\\" and \\\'single quotes\\\'')
self.assertEqual(addslashes(ur'\ : backslashes, too'),
u'\\\\ : backslashes, too')
def test_capfirst(self):
self.assertEqual(capfirst(u'hello world'), u'Hello world')
def test_escapejs(self):
self.assertEqual(escapejs(u'"double quotes" and \'single quotes\''),
u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')
self.assertEqual(escapejs(ur'\ : backslashes, too'),
u'\\u005C : backslashes, too')
self.assertEqual(escapejs(u'and lots of whitespace: \r\n\t\v\f\b'),
u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')
self.assertEqual(escapejs(ur'<script>and this</script>'),
u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E')
self.assertEqual(
escapejs(u'paragraph separator:\u2029and line separator:\u2028'),
u'paragraph separator:\\u2029and line separator:\\u2028')
def test_fix_ampersands(self):
self.assertEqual(fix_ampersands(u'Jack & Jill & Jeroboam'),
u'Jack & Jill & Jeroboam')
def test_linenumbers(self):
self.assertEqual(linenumbers(u'line 1\nline 2'),
u'1. line 1\n2. line 2')
self.assertEqual(linenumbers(u'\n'.join([u'x'] * 10)),
u'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\
u'x\n08. x\n09. x\n10. x')
def test_lower(self):
self.assertEqual(lower('TEST'), u'test')
# uppercase E umlaut
self.assertEqual(lower(u'\xcb'), u'\xeb')
def test_make_list(self):
self.assertEqual(make_list('abc'), [u'a', u'b', u'c'])
self.assertEqual(make_list(1234), [u'1', u'2', u'3', u'4'])
def test_slugify(self):
self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\
' silly characters ?%.$!/'),
u'jack-jill-like-numbers-123-and-4-and-silly-characters')
self.assertEqual(slugify(u"Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
u'un-elephant-a-loree-du-bois')
def test_stringformat(self):
self.assertEqual(stringformat(1, u'03d'), u'001')
self.assertEqual(stringformat(1, u'z'), u'')
def test_title(self):
self.assertEqual(title('a nice title, isn\'t it?'),
u"A Nice Title, Isn't It?")
self.assertEqual(title(u'discoth\xe8que'), u'Discoth\xe8que')
def test_truncatewords(self):
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 1), u'A ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 5),
u'A sentence with a few ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 100),
u'A sentence with a few words in it')
self.assertEqual(
truncatewords(u'A sentence with a few words in it',
'not a number'), u'A sentence with a few words in it')
def test_truncatewords_html(self):
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 0), u'')
self.assertEqual(truncatewords_html(u'<p>one <a href="#">two - '\
u'three <br>four</a> five</p>', 2),
u'<p>one <a href="#">two ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 4),
u'<p>one <a href="#">two - three <br>four ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 5),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 100),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'\xc5ngstr\xf6m was here', 1), u'\xc5ngstr\xf6m ...')
def test_upper(self):
self.assertEqual(upper(u'Mixed case input'), u'MIXED CASE INPUT')
# lowercase e umlaut
self.assertEqual(upper(u'\xeb'), u'\xcb')
def test_urlencode(self):
self.assertEqual(urlencode(u'fran\xe7ois & jill'),
u'fran%C3%A7ois%20%26%20jill')
self.assertEqual(urlencode(1), u'1')
def test_iriencode(self):
self.assertEqual(iriencode(u'S\xf8r-Tr\xf8ndelag'),
u'S%C3%B8r-Tr%C3%B8ndelag')
self.assertEqual(iriencode(urlencode(u'fran\xe7ois & jill')),
u'fran%C3%A7ois%20%26%20jill')
def test_urlizetrunc(self):
self.assertEqual(urlizetrunc(u'http://short.com/', 20), u'<a href='\
u'"http://short.com/" rel="nofollow">http://short.com/</a>')
self.assertEqual(urlizetrunc(u'http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\
u'meta=" rel="nofollow">http://www.google...</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\
u'&meta=" rel="nofollow">http://www.google...</a>')
# Check truncating of URIs which are the exact length
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(urlizetrunc(uri, 31),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/test/</a>')
self.assertEqual(urlizetrunc(uri, 30),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/t...</a>')
self.assertEqual(urlizetrunc(uri, 2),
u'<a href="http://31characteruri.com/test/"'\
u' rel="nofollow">...</a>')
def test_urlize(self):
# Check normal urlize
self.assertEqual(urlize('http://google.com'),
u'<a href="http://google.com" rel="nofollow">http://google.com</a>')
self.assertEqual(urlize('http://google.com/'),
u'<a href="http://google.com/" rel="nofollow">http://google.com/</a>')
self.assertEqual(urlize('www.google.com'),
u'<a href="http://www.google.com" rel="nofollow">www.google.com</a>')
self.assertEqual(urlize('djangoproject.org'),
u'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')
self.assertEqual(urlize('info@djangoproject.org'),
u'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>')
# Check urlize with https addresses
self.assertEqual(urlize('https://google.com'),
u'<a href="https://google.com" rel="nofollow">https://google.com</a>')
def test_wordcount(self):
self.assertEqual(wordcount(''), 0)
self.assertEqual(wordcount(u'oneword'), 1)
self.assertEqual(wordcount(u'lots of words'), 3)
self.assertEqual(wordwrap(u'this is a long paragraph of text that '\
u'really needs to be wrapped I\'m afraid', 14),
u"this is a long\nparagraph of\ntext that\nreally needs\nto be "\
u"wrapped\nI'm afraid")
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented', 14),
u'this is a\nshort\nparagraph of\ntext.\n But this\nline '\
u'should be\nindented')
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented',15), u'this is a short\n'\
u'paragraph of\ntext.\n But this line\nshould be\nindented')
def test_rjust(self):
self.assertEqual(ljust(u'test', 10), u'test ')
self.assertEqual(ljust(u'test', 3), u'test')
self.assertEqual(rjust(u'test', 10), u' test')
self.assertEqual(rjust(u'test', 3), u'test')
def test_center(self):
self.assertEqual(center(u'test', 6), u' test ')
def test_cut(self):
self.assertEqual(cut(u'a string to be mangled', 'a'),
u' string to be mngled')
self.assertEqual(cut(u'a string to be mangled', 'ng'),
u'a stri to be maled')
self.assertEqual(cut(u'a string to be mangled', 'strings'),
u'a string to be mangled')
def test_force_escape(self):
self.assertEqual(
force_escape(u'<some html & special characters > here'),
u'<some html & special characters > here')
self.assertEqual(
force_escape(u'<some html & special characters > here ĐÅ€£'),
u'<some html & special characters > here'\
u' \u0110\xc5\u20ac\xa3')
def test_linebreaks(self):
self.assertEqual(linebreaks(u'line 1'), u'<p>line 1</p>')
self.assertEqual(linebreaks(u'line 1\nline 2'),
u'<p>line 1<br />line 2</p>')
def test_removetags(self):
self.assertEqual(removetags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags', 'script img'),
u'some <b>html</b> with alert("You smell") disallowed tags')
self.assertEqual(striptags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags'),
u'some html with alert("You smell") disallowed tags')
def test_dictsort(self):
sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}], 'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]])
def test_dictsortreversed(self):
sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]])
def test_first(self):
self.assertEqual(first([0,1,2]), 0)
self.assertEqual(first(u''), u'')
self.assertEqual(first(u'test'), u't')
def test_join(self):
self.assertEqual(join([0,1,2], u'glue'), u'0glue1glue2')
def test_length(self):
self.assertEqual(length(u'1234'), 4)
self.assertEqual(length([1,2,3,4]), 4)
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is(u'a', 10), False)
def test_slice(self):
self.assertEqual(slice_(u'abcdefg', u'0'), u'')
self.assertEqual(slice_(u'abcdefg', u'1'), u'a')
self.assertEqual(slice_(u'abcdefg', u'-1'), u'abcdef')
self.assertEqual(slice_(u'abcdefg', u'1:2'), u'b')
self.assertEqual(slice_(u'abcdefg', u'1:3'), u'bc')
self.assertEqual(slice_(u'abcdefg', u'0::2'), u'aceg')
def test_unordered_list(self):
self.assertEqual(unordered_list([u'item 1', u'item 2']),
u'\t<li>item 1</li>\n\t<li>item 2</li>')
self.assertEqual(unordered_list([u'item 1', [u'item 1.1']]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', u'item1.2'], u'item 2']),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\
u'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', [u'item 1.1.1',
[u'item 1.1.1.1']]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\
u'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\
u'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(
['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
u'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\
u'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\
u'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')
class ULItem(object):
def __init__(self, title):
self.title = title
def __unicode__(self):
return u'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a,b]),
u'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
# Old format for unordered lists should still work
self.assertEqual(unordered_list([u'item 1', []]), u'\t<li>item 1</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []],
[u'item 1.2', []]]]), u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\
u'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',
[]], ['Topeka', []]]], ['Illinois', []]]]), u'\t<li>States\n\t'\
u'<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\
u'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\
u'Illinois</li>\n\t</ul>\n\t</li>')
def test_add(self):
self.assertEqual(add(u'1', u'2'), 3)
def test_get_digit(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
self.assertEqual(get_digit(u'xyz', 0), u'xyz')
def test_date(self):
# real testing of date() is in dateformat.py
self.assertEqual(date(datetime.datetime(2005, 12, 29), u"d F Y"),
u'29 December 2005')
self.assertEqual(date(datetime.datetime(2005, 12, 29), ur'jS o\f F'),
u'29th of December')
def test_time(self):
# real testing of time() is done in dateformat.py
self.assertEqual(time(datetime.time(13), u"h"), u'01')
self.assertEqual(time(datetime.time(0), u"h"), u'12')
def test_timesince(self):
# real testing is done in timesince.py, where we can provide our own 'now'
self.assertEqual(
timesince(datetime.datetime.now() - datetime.timedelta(1)),
u'1 day')
self.assertEqual(
timesince(datetime.datetime(2005, 12, 29),
datetime.datetime(2005, 12, 30)),
u'1 day')
def test_timeuntil(self):
self.assertEqual(
timeuntil(datetime.datetime.now() + datetime.timedelta(1)),
u'1 day')
self.assertEqual(timeuntil(datetime.datetime(2005, 12, 30),
datetime.datetime(2005, 12, 29)),
u'1 day')
def test_default(self):
self.assertEqual(default(u"val", u"default"), u'val')
self.assertEqual(default(None, u"default"), u'default')
self.assertEqual(default(u'', u"default"), u'default')
def test_if_none(self):
self.assertEqual(default_if_none(u"val", u"default"), u'val')
self.assertEqual(default_if_none(None, u"default"), u'default')
self.assertEqual(default_if_none(u'', u"default"), u'')
def test_divisibleby(self):
self.assertEqual(divisibleby(4, 2), True)
self.assertEqual(divisibleby(4, 3), False)
def test_yesno(self):
self.assertEqual(yesno(True), u'yes')
self.assertEqual(yesno(False), u'no')
self.assertEqual(yesno(None), u'maybe')
self.assertEqual(yesno(True, u'certainly,get out of town,perhaps'),
u'certainly')
self.assertEqual(yesno(False, u'certainly,get out of town,perhaps'),
u'get out of town')
self.assertEqual(yesno(None, u'certainly,get out of town,perhaps'),
u'perhaps')
self.assertEqual(yesno(None, u'certainly,get out of town'),
u'get out of town')
def test_filesizeformat(self):
self.assertEqual(filesizeformat(1023), u'1023 bytes')
self.assertEqual(filesizeformat(1024), u'1.0 KB')
self.assertEqual(filesizeformat(10*1024), u'10.0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024.0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1.0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1.0 GB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 bytes')
self.assertEqual(filesizeformat(""), u'0 bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 bytes')
def test_localized_filesizeformat(self):
from django.utils.translation import activate, deactivate
old_localize = settings.USE_L10N
try:
activate('de')
settings.USE_L10N = True
self.assertEqual(filesizeformat(1023), u'1023 Bytes')
self.assertEqual(filesizeformat(1024), u'1,0 KB')
self.assertEqual(filesizeformat(10*1024), u'10,0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024,0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1,0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1,0 GB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 Bytes')
self.assertEqual(filesizeformat(""), u'0 Bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 Bytes')
finally:
deactivate()
settings.USE_L10N = old_localize
def test_pluralize(self):
self.assertEqual(pluralize(1), u'')
self.assertEqual(pluralize(0), u's')
self.assertEqual(pluralize(2), u's')
self.assertEqual(pluralize([1]), u'')
self.assertEqual(pluralize([]), u's')
self.assertEqual(pluralize([1,2,3]), u's')
self.assertEqual(pluralize(1,u'es'), u'')
self.assertEqual(pluralize(0,u'es'), u'es')
self.assertEqual(pluralize(2,u'es'), u'es')
self.assertEqual(pluralize(1,u'y,ies'), u'y')
self.assertEqual(pluralize(0,u'y,ies'), u'ies')
self.assertEqual(pluralize(2,u'y,ies'), u'ies')
self.assertEqual(pluralize(0,u'y,ies,error'), u'')
def test_phone2numeric(self):
self.assertEqual(phone2numeric(u'0800 flowers'), u'0800 3569377')
def test_non_string_input(self):
# Filters shouldn't break if passed non-strings
self.assertEqual(addslashes(123), u'123')
self.assertEqual(linenumbers(123), u'1. 123')
self.assertEqual(lower(123), u'123')
self.assertEqual(make_list(123), [u'1', u'2', u'3'])
self.assertEqual(slugify(123), u'123')
self.assertEqual(title(123), u'123')
self.assertEqual(truncatewords(123, 2), u'123')
self.assertEqual(upper(123), u'123')
self.assertEqual(urlencode(123), u'123')
self.assertEqual(urlize(123), u'123')
self.assertEqual(urlizetrunc(123, 1), u'123')
self.assertEqual(wordcount(123), 1)
self.assertEqual(wordwrap(123, 2), u'123')
self.assertEqual(ljust('123', 4), u'123 ')
self.assertEqual(rjust('123', 4), u' 123')
self.assertEqual(center('123', 5), u' 123 ')
self.assertEqual(center('123', 6), u' 123 ')
self.assertEqual(cut(123, '2'), u'13')
self.assertEqual(escape(123), u'123')
self.assertEqual(linebreaks(123), u'<p>123</p>')
self.assertEqual(linebreaksbr(123), u'123')
self.assertEqual(removetags(123, 'a'), u'123')
self.assertEqual(striptags(123), u'123')
|
Edraak/edx-platform | refs/heads/master | lms/djangoapps/bulk_email/admin.py | 44 | """
Django admin page for bulk email models
"""
from django.contrib import admin
from bulk_email.models import CourseEmail, Optout, CourseEmailTemplate, CourseAuthorization
from bulk_email.forms import CourseEmailTemplateForm, CourseAuthorizationAdminForm
class CourseEmailAdmin(admin.ModelAdmin):
"""Admin for course email."""
readonly_fields = ('sender',)
class OptoutAdmin(admin.ModelAdmin):
"""Admin for optouts."""
list_display = ('user', 'course_id')
class CourseEmailTemplateAdmin(admin.ModelAdmin):
"""Admin for course email templates."""
form = CourseEmailTemplateForm
fieldsets = (
(None, {
# make the HTML template display above the plain template:
'fields': ('html_template', 'plain_template', 'name'),
'description': '''
Enter template to be used by course staff when sending emails to enrolled students.
The HTML template is for HTML email, and may contain HTML markup. The plain template is
for plaintext email. Both templates should contain the string '{{message_body}}' (with
two curly braces on each side), to indicate where the email text is to be inserted.
Other tags that may be used (surrounded by one curly brace on each side):
{platform_name} : the name of the platform
{course_title} : the name of the course
{course_url} : the course's full URL
{email} : the user's email address
{account_settings_url} : URL at which users can change account preferences
{email_settings_url} : URL at which users can change course email preferences
{course_image_url} : URL for the course's course image.
Will return a broken link if course doesn't have a course image set.
Note that there is currently NO validation on tags, so be careful. Typos or use of
unsupported tags will cause email sending to fail.
'''
}),
)
# Turn off the action bar (we have no bulk actions)
actions = None
def has_add_permission(self, request):
"""Enable the ability to add new templates, as we want to be able to define multiple templates."""
return True
def has_delete_permission(self, request, obj=None):
"""
Disables the ability to remove existing templates, as we'd like to make sure we don't have dangling references.
"""
return False
class CourseAuthorizationAdmin(admin.ModelAdmin):
"""Admin for enabling email on a course-by-course basis."""
form = CourseAuthorizationAdminForm
fieldsets = (
(None, {
'fields': ('course_id', 'email_enabled'),
'description': '''
Enter a course id in the following form: Org/Course/CourseRun, eg MITx/6.002x/2012_Fall
Do not enter leading or trailing slashes. There is no need to surround the course ID with quotes.
Validation will be performed on the course name, and if it is invalid, an error message will display.
To enable email for the course, check the "Email enabled" box, then click "Save".
'''
}),
)
admin.site.register(CourseEmail, CourseEmailAdmin)
admin.site.register(Optout, OptoutAdmin)
admin.site.register(CourseEmailTemplate, CourseEmailTemplateAdmin)
admin.site.register(CourseAuthorization, CourseAuthorizationAdmin)
|
byterom/android_external_chromium_org | refs/heads/12.1 | build/gn_helpers.py | 117 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions useful when writing scripts that are run from GN's
exec_script function."""
class GNException(Exception):
pass
def ToGNString(value, allow_dicts = True):
"""Prints the given value to stdout.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, str):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + value.replace('"', '\\"') + '"'
if isinstance(value, list):
return '[ %s ]' % ', '.join(ToGNString(v) for v in value)
if isinstance(value, dict):
if not allow_dicts:
raise GNException("Attempting to recursively print a dictionary.")
result = ""
for key in value:
if not isinstance(key, str):
raise GNException("Dictionary key is not a string.")
result += "%s = %s\n" % (key, ToGNString(value[key], False))
return result
if isinstance(value, int):
return str(value)
raise GNException("Unsupported type when printing to GN.")
|
OpenPOWER-BigData/HDP-kafka | refs/heads/master | tests/kafkatest/__init__.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
# This determines the version of kafkatest that can be published to PyPi and installed with pip
#
# Note that in development, this version name can't follow Kafka's convention of having a trailing "-SNAPSHOT"
# due to python version naming restrictions, which are enforced by python packaging tools
# (see https://www.python.org/dev/peps/pep-0440/)
#
# Instead, in trunk, the version should have a suffix of the form ".devN"
#
# For example, when Kafka is at version 0.9.0.0-SNAPSHOT, this should be something like "0.9.0.0.dev0"
__version__ = '0.9.0.1.dev0'
|
LowieHuyghe/edmunds | refs/heads/master | edmunds/encoding/encoding.py | 1 |
import sys
class Encoding(object):
@staticmethod
def normalize(value):
"""
Normalize value
:param value: The value
:return: The processed value
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return Encoding.to_ascii(value)
else:
return Encoding.to_unicode(value)
@staticmethod
def to_ascii(value):
"""
To ascii
:param value: The value
:return: The processed value
"""
# Dict
if isinstance(value, dict):
processed_value = {}
for key in value:
if Encoding._is_unicode(key):
processed_key = key.encode('ascii')
else:
processed_key = key
processed_value[processed_key] = Encoding.to_ascii(value[key])
# List
elif isinstance(value, list):
processed_value = []
for value in value:
processed_value.append(Encoding.to_ascii(value))
# Unicode
elif Encoding._is_unicode(value):
processed_value = value.encode('ascii')
else:
processed_value = value
return processed_value
@staticmethod
def to_unicode(value):
"""
To unicode
:param value: The value
:return: The processed value
"""
# Dict
if isinstance(value, dict):
processed_value = {}
for key in value:
if Encoding._is_ascii(key):
processed_key = key.decode('utf-8')
else:
processed_key = key
processed_value[processed_key] = Encoding.to_unicode(value[key])
# List
elif isinstance(value, list):
processed_value = []
for value in value:
processed_value.append(Encoding.to_unicode(value))
# Unicode
elif Encoding._is_ascii(value):
processed_value = value.decode('utf-8')
else:
processed_value = value
return processed_value
@staticmethod
def get_text_type():
"""
Get text type
:return: class
"""
if sys.version_info < (3, 0):
return unicode
else:
return str
@staticmethod
def _is_ascii(value):
"""
Check if ascii
:param value: The value
:return: Ascii or not
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return isinstance(value, str)
else:
return isinstance(value, bytes)
@staticmethod
def _is_unicode(value):
"""
Check if unicode
:param value: The value
:return: Ascii or not
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return isinstance(value, unicode)
else:
return isinstance(value, str)
|
mbiciunas/nix | refs/heads/master | src/cli_config/cli.py | 1 | # NixConfig
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import sys
from cli_config.script import script
from cli_config.script_tag import script_tag
from cli_config.tag import tag
LOG = logging.getLogger(__name__)
def make_parser(sys_argv):
"""
Command line argument parser for the Nixconfig program.
"""
LOG.debug("Call parser")
parser = argparse.ArgumentParser(
description='Configure Nix',
usage='''nixconfig {script,script-tag,tag} [<args>]
Commands:
script create, delete, update... scripts
script-tag add, remove, list... tags from scripts
tag create, delete, list... tags
''')
parser.add_argument('command', nargs="?", default="", help='Subcommand to run')
args = parser.parse_args(sys_argv[1:2])
_prog = sys.argv[0].rsplit("/", 1)[-1]
if args.command == "script":
script.script(_prog, sys_argv[2:])
elif args.command == "script-tag":
script_tag.script_tag(_prog, sys_argv[2:])
elif args.command == "tag":
tag.tag(_prog, sys_argv[2:])
else:
print('Unrecognized command')
parser.print_help()
exit(1)
|
Eric89GXL/scikit-learn | refs/heads/master | examples/ensemble/plot_gradient_boosting_regression.py | 8 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
pl.figure(figsize=(12, 6))
pl.subplot(1, 2, 1)
pl.title('Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
pl.legend(loc='upper right')
pl.xlabel('Boosting Iterations')
pl.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, boston.feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
|
slightlymadphoenix/activityPointsApp | refs/heads/master | activitypoints/lib/python3.5/site-packages/django/conf/locale/th/formats.py | 44 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y, G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
SHORT_DATETIME_FORMAT = 'j M Y, G:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', # 25/10/2006
'%d %b %Y', # 25 ต.ค. 2006
'%d %B %Y', # 25 ตุลาคม 2006
]
TIME_INPUT_FORMATS = [
'%H:%M:%S.%f', # 14:30:59.000200
'%H:%M:%S', # 14:30:59
'%H:%M', # 14:30
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S.%f', # 25/10/2006 14:30:59.000200
'%d/%m/%Y %H:%M:%S', # 25/10/2006 14:30:59
'%d/%m/%Y %H:%M', # 25/10/2006 14:30
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
larsbadde/NUI_4.0 | refs/heads/master | Server/Main/project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
popazerty/obh-sh4 | refs/heads/master | lib/python/Screens/ScreenSaver.py | 40 | from Screens.Screen import Screen
from Components.MovieList import AUDIO_EXTENSIONS
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Pixmap import Pixmap
from enigma import ePoint, eTimer, iPlayableService
import os, random
class Screensaver(Screen):
def __init__(self, session):
self.skin = """
<screen name="Screensaver" position="fill" flags="wfNoBorder">
<eLabel position="fill" backgroundColor="#54000000" zPosition="0"/>
<widget name="picture" pixmap="skin_default/screensaverpicture.png" position="0,0" size="119,139" alphatest="blend" transparent="1" zPosition="1"/>
</screen>"""
Screen.__init__(self, session)
self.moveLogoTimer = eTimer()
self.moveLogoTimer.callback.append(self.doMovePicture)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted
})
self["picture"] = Pixmap()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
picturesize = self["picture"].getSize()
self.maxx = self.instance.size().width() - picturesize[0]
self.maxy = self.instance.size().height() - picturesize[1]
self.doMovePicture()
def __onHide(self):
self.moveLogoTimer.stop()
def __onShow(self):
self.moveLogoTimer.startLongTimer(5)
def serviceStarted(self):
if self.shown:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString().split(":")
if not os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS:
self.hide()
def doMovePicture(self):
self.posx = random.randint(1,self.maxx)
self.posy = random.randint(1,self.maxy)
self["picture"].instance.move(ePoint(self.posx, self.posy))
self.moveLogoTimer.startLongTimer(5)
|
ScottBuchanan/eden | refs/heads/master | modules/tests/volunteer/create_volunteer_skill.py | 23 | """ Sahana Eden Automated Test - HRM001 Create Volunteer Skill
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
class CreateVolunteerSkill(SeleniumUnitTest):
def test_hrm001_create_volunteer_skill(self):
"""
@case: HRM001
@description: Create Volunteer Skill
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
settings = current.deployment_settings
self.login(account="admin", nexturl="vol/skill/create")
data = [
("name",
"Technical"),
("comments",
"Comment/Description of the skill goes here.")
]
if settings.get_hrm_skill_types():
data.append(("skill_type_id", "Computer"))
self.create("hrm_skill", data)
|
eunchong/build | refs/heads/master | masters/master.tryserver.libyuv/master_site_config.py | 1 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class LibyuvTryServer(Master.Master4):
project_name = 'Libyuv Try Server'
master_port = 8006
slave_port = 8106
master_port_alt = 8206
try_job_port = 8306
from_address = 'libyuv-cb-watchlist@google.com'
reply_to = 'chrome-troopers+tryserver@google.com'
code_review_site = 'https://codereview.chromium.org'
svn_url = 'svn://svn-mirror.golo.chromium.org/chrome-try/try-libyuv'
buildbot_url = 'http://build.chromium.org/p/tryserver.libyuv/'
service_account_file = 'service-account-libyuv.json'
buildbucket_bucket = 'master.tryserver.libyuv'
|
megraf/asuswrt-merlin | refs/heads/master | release/src/router/samba-3.5.8/source3/stf/info3cache.py | 98 | #!/usr/bin/python
#
# Upon a winbindd authentication, test that an info3 record is cached in
# netsamlogon_cache.tdb and cache records are removed from winbindd_cache.tdb
#
import comfychair, stf
from samba import tdb, winbind
#
# We want to implement the following test on a win2k native mode domain.
#
# 1. trash netsamlogon_cache.tdb
# 2. wbinfo -r DOMAIN\Administrator [FAIL]
# 3. wbinfo --auth-crap DOMAIN\Administrator%password [PASS]
# 4. wbinfo -r DOMAIN\Administrator [PASS]
#
# Also for step 3 we want to try 'wbinfo --auth-smbd' and
# 'wbinfo --auth-plaintext'
#
#
# TODO: To implement this test we need to be able to
#
# - pass username%password combination for an invidivual winbindd request
# (so we can get the administrator SID so we can clear the info3 cache)
#
# - start/restart winbindd (to trash the winbind cache)
#
# - from samba import dynconfig (to find location of info3 cache)
#
# - be able to modify the winbindd cache (to set/reset individual winbind
# cache entries)
#
# - have --auth-crap present in HEAD
#
class WinbindAuthCrap(comfychair.TestCase):
def runtest(self):
raise comfychair.NotRunError, "not implemented"
class WinbindAuthSmbd(comfychair.TestCase):
def runtest(self):
# Grr - winbindd in HEAD doesn't contain the auth_smbd function
raise comfychair.NotRunError, "no auth_smbd in HEAD"
class WinbindAuthPlaintext(comfychair.TestCase):
def runtest(self):
raise comfychair.NotRunError, "not implemented"
tests = [WinbindAuthCrap, WinbindAuthSmbd, WinbindAuthPlaintext]
if __name__ == "__main__":
comfychair.main(tests)
|
garoose/eecs494.p2 | refs/heads/master | jni/external/freetype2/src/tools/chktrcmp.py | 381 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
|
ngeiswei/ardour | refs/heads/master | tools/cstyle.py | 12 | #!/usr/bin/python -tt
#
# Copyright (C) 2005-2012 Erik de Castro Lopo <erikd@mega-nerd.com>
#
# Released under the 2 clause BSD license.
"""
This program checks C code for compliance to coding standards used in
libsndfile and other projects I run.
"""
import re
import sys
class Preprocessor:
"""
Preprocess lines of C code to make it easier for the CStyleChecker class to
test for correctness. Preprocessing works on a single line at a time but
maintains state between consecutive lines so it can preprocessess multi-line
comments.
Preprocessing involves:
- Strip C++ style comments from a line.
- Strip C comments from a series of lines. When a C comment starts and
ends on the same line it will be replaced with 'comment'.
- Replace arbitrary C strings with the zero length string.
- Replace '#define f(x)' with '#define f (c)' (The C #define requires that
there be no space between defined macro name and the open paren of the
argument list).
Used by the CStyleChecker class.
"""
def __init__ (self):
self.comment_nest = 0
self.leading_space_re = re.compile ('^(\t+| )')
self.trailing_space_re = re.compile ('(\t+| )$')
self.define_hack_re = re.compile ("(#\s*define\s+[a-zA-Z0-9_]+)\(")
def comment_nesting (self):
"""
Return the currect comment nesting. At the start and end of the file,
this value should be zero. Inside C comments it should be 1 or
(possibly) more.
"""
return self.comment_nest
def __call__ (self, line):
"""
Strip the provided line of C and C++ comments. Stripping of multi-line
C comments works as expected.
"""
line = self.define_hack_re.sub (r'\1 (', line)
line = self.process_strings (line)
# Strip C++ style comments.
if self.comment_nest == 0:
line = re.sub ("( |\t*)//.*", '', line)
# Strip C style comments.
open_comment = line.find ('/*')
close_comment = line.find ('*/')
if self.comment_nest > 0 and close_comment < 0:
# Inside a comment block that does not close on this line.
return ""
if open_comment >= 0 and close_comment < 0:
# A comment begins on this line but doesn't close on this line.
self.comment_nest += 1
return self.trailing_space_re.sub ('', line [:open_comment])
if open_comment < 0 and close_comment >= 0:
# Currently open comment ends on this line.
self.comment_nest -= 1
return self.trailing_space_re.sub ('', line [close_comment + 2:])
if open_comment >= 0 and close_comment > 0 and self.comment_nest == 0:
# Comment begins and ends on this line. Replace it with 'comment'
# so we don't need to check whitespace before and after the comment
# we're removing.
newline = line [:open_comment] + "comment" + line [close_comment + 2:]
return self.__call__ (newline)
return line
def process_strings (self, line):
"""
Given a line of C code, return a string where all literal C strings have
been replaced with the empty string literal "".
"""
for k in range (0, len (line)):
if line [k] == '"':
start = k
for k in range (start + 1, len (line)):
if line [k] == '"' and line [k - 1] != '\\':
return line [:start + 1] + '"' + self.process_strings (line [k + 1:])
return line
class CStyleChecker:
"""
A class for checking the whitespace and layout of a C code.
"""
def __init__ (self, debug):
self.debug = debug
self.filename = None
self.error_count = 0
self.line_num = 1
self.orig_line = ''
self.trailing_newline_re = re.compile ('[\r\n]+$')
self.indent_re = re.compile ("^\s*")
self.last_line_indent = ""
self.last_line_indent_curly = False
self.error_checks = \
[ ( re.compile ("^ "), "leading space as indentation instead of tab - use tabs to indent, spaces to align" )
]
self.warning_checks = \
[ ( re.compile ("{[^\s]"), "missing space after open brace" )
, ( re.compile ("[^\s]}"), "missing space before close brace" )
, ( re.compile ("^[ \t]+$"), "empty line contains whitespace" )
, ( re.compile ("[^\s][ \t]+$"), "contains trailing whitespace" )
, ( re.compile (",[^\s\n]"), "missing space after comma" )
, ( re.compile (";[a-zA-Z0-9]"), "missing space after semi-colon" )
, ( re.compile ("=[^\s\"'=]"), "missing space after assignment" )
# Open and close parenthesis.
, ( re.compile ("[^_\s\(\[\*&']\("), "missing space before open parenthesis" )
, ( re.compile ("\)(-[^>]|[^;,'\s\n\)\]-])"), "missing space after close parenthesis" )
, ( re.compile ("\( [^;]"), "space after open parenthesis" )
, ( re.compile ("[^;] \)"), "space before close parenthesis" )
# Open and close square brace.
, ( re.compile ("\[ "), "space after open square brace" )
, ( re.compile (" \]"), "space before close square brace" )
# Space around operators.
, ( re.compile ("[^\s][\*/%+-][=][^\s]"), "missing space around opassign" )
, ( re.compile ("[^\s][<>!=^/][=]{1,2}[^\s]"), "missing space around comparison" )
# Parens around single argument to return.
, ( re.compile ("\s+return\s+\([a-zA-Z0-9_]+\)\s+;"), "parens around return value" )
]
def get_error_count (self):
"""
Return the current error count for this CStyleChecker object.
"""
return self.error_count
def check_files (self, files):
"""
Run the style checker on all the specified files.
"""
for filename in files:
self.check_file (filename)
def check_file (self, filename):
"""
Run the style checker on the specified file.
"""
self.filename = filename
try:
cfile = open (filename, "r")
except IOError as e:
return
self.line_num = 1
preprocess = Preprocessor ()
while 1:
line = cfile.readline ()
if not line:
break
line = self.trailing_newline_re.sub ('', line)
self.orig_line = line
self.line_checks (preprocess (line))
self.line_num += 1
cfile.close ()
self.filename = None
# Check for errors finding comments.
if preprocess.comment_nesting () != 0:
print ("Weird, comments nested incorrectly.")
sys.exit (1)
return
def line_checks (self, line):
"""
Run the style checker on provided line of text, but within the context
of how the line fits within the file.
"""
indent = len (self.indent_re.search (line).group ())
if re.search ("^\s+}", line):
if not self.last_line_indent_curly and indent != self.last_line_indent:
None # self.error ("bad indent on close curly brace")
self.last_line_indent_curly = True
else:
self.last_line_indent_curly = False
# Now all the stylistic warnings regex checks.
for (check_re, msg) in self.warning_checks:
if check_re.search (line):
self.warning (msg)
# Now all the stylistic error regex checks.
for (check_re, msg) in self.error_checks:
if check_re.search (line):
self.error (msg)
if re.search ("[a-zA-Z0-9_][<>!=^/&\|]{1,2}[a-zA-Z0-9_]", line):
# ignore #include <foo.h> and C++ templates with indirection/pointer/reference operators
if not re.search (".*#include.*[a-zA-Z0-9]/[a-zA-Z]", line) and not re.search ("[a-zA-Z0-9_]>[&\*]*\s", line):
self.error ("missing space around operator")
self.last_line_indent = indent
return
def error (self, msg):
"""
Print an error message and increment the error count.
"""
print ("%s (%d) : STYLE ERROR %s" % (self.filename, self.line_num, msg))
if self.debug:
print ("'" + self.orig_line + "'")
self.error_count += 1
def warning (self, msg):
"""
Print a warning message and increment the error count.
"""
print ("%s (%d) : STYLE WARNING %s" % (self.filename, self.line_num, msg))
if self.debug:
print ("'" + self.orig_line + "'")
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
if len (sys.argv) < 1:
print ("Usage : yada yada")
sys.exit (1)
# Create a new CStyleChecker object
if sys.argv [1] == '-d' or sys.argv [1] == '--debug':
cstyle = CStyleChecker (True)
cstyle.check_files (sys.argv [2:])
else:
cstyle = CStyleChecker (False)
cstyle.check_files (sys.argv [1:])
if cstyle.get_error_count ():
sys.exit (1)
sys.exit (0)
|
SHASHANKB/spark | refs/heads/master | python/pyspark/mllib/classification.py | 34 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import warnings
import numpy
from numpy import array
from pyspark import RDD, since
from pyspark.streaming import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import DenseVector, SparseVector, _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint, LinearModel, _regression_train_wrapper,
StreamingLinearAlgorithm)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'LogisticRegressionWithLBFGS',
'SVMModel', 'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes',
'StreamingLogisticRegressionWithSGD']
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights, intercept):
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold = None
@since('1.4.0')
def setThreshold(self, value):
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property
@since('1.4.0')
def threshold(self):
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since('1.4.0')
def clearThreshold(self):
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@since('1.4.0')
def predict(self, test):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not bea single value, so the intercepts will be part of the
weights.)
:param numFeatures:
The dimension of the features.
:param numClasses:
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(array([0.0, 1.0]))
1
>>> lrm.predict(array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept, numFeatures, numClasses):
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // (self._numClasses - 1)
self._weightsMatrix = self._coeff.toArray().reshape(self._numClasses - 1,
self._dataWithBiasSize)
@property
@since('1.4.0')
def numFeatures(self):
"""
Dimension of the features.
"""
return self._numFeatures
@property
@since('1.4.0')
def numClasses(self):
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
class LogisticRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or
LogisticRegressionWithLBFGS.
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.01, regType="l2", intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.")
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam), regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS(object):
"""
.. versionadded:: 1.2.0
"""
@classmethod
@since('1.2.0')
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, regType="l2",
intercept=False, validateData=True, convergenceTol=0.001):
"""
Train a support vector machine on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regType:
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader):
"""
Model for Naive Bayes classifiers.
:param labels:
List of labels.
:param pi:
Log of class priors, whose dimension is C, number of labels.
:param theta:
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
@since('0.9.0')
def predict(self, x):
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path)
# Can not unpickle array.array from Pyrolite in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param regParam:
L2 Regularization parameter.
(default: 0.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.0,
convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLogisticRegressionWithSGD, self).__init__(
model=self._model)
@since('1.5.0')
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
regParam=self.regParam, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.classification tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
amilios/amilios.github.io | refs/heads/master | markdown_generator/pubsFromBib.py | 83 | #!/usr/bin/env python
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)).
#
# The core python code is also in `pubsFromBibs.py`.
# Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:
# * bib file names
# * specific venue keys based on your bib file preferences
# * any specific pre-text for specific files
# * Collection Name (future feature)
#
# TODO: Make this work with other databases of citations,
# TODO: Merge this with the existing TSV parsing solution
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
#todo: incorporate different collection types rather than a catch all publications, requires other changes to template
publist = {
"proceeding": {
"file" : "proceedings.bib",
"venuekey": "booktitle",
"venue-pretext": "In the proceedings of ",
"collection" : {"name":"publications",
"permalink":"/publication/"}
},
"journal":{
"file": "pubs.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"publications",
"permalink":"/publication/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
#loop through the individual references in a given bibtex file
for bib_id in bibdata.entries:
#reset default date
pub_year = "1900"
pub_month = "01"
pub_day = "01"
b = bibdata.entries[bib_id].fields
try:
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
pub_date = pub_year+"-"+pub_month+"-"+pub_day
#strip out {} as needed (some bibtex entries that maintain formatting)
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-")
url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title)
url_slug = url_slug.replace("--","-")
md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-")
html_filename = (str(pub_date) + "-" + url_slug).replace("--","-")
#Build Citation from text
citation = ""
#citation authors - todo - add highlighting for primary author?
for author in bibdata.entries[bib_id].persons["author"]:
citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", "
#citation title
citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\""
#add venue logic depending on citation type
venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","")
citation = citation + " " + html_escape(venue)
citation = citation + ", " + pub_year + "."
## YAML variables
md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n'
md += """collection: """ + publist[pubsource]["collection"]["name"]
md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename
note = False
if "note" in b.keys():
if len(str(b["note"])) > 5:
md += "\nexcerpt: '" + html_escape(b["note"]) + "'"
note = True
md += "\ndate: " + str(pub_date)
md += "\nvenue: '" + html_escape(venue) + "'"
url = False
if "url" in b.keys():
if len(str(b["url"])) > 5:
md += "\npaperurl: '" + b["url"] + "'"
url = True
md += "\ncitation: '" + html_escape(citation) + "'"
md += "\n---"
## Markdown description for individual page
if note:
md += "\n" + html_escape(b["note"]) + "\n"
if url:
md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n"
else:
md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation"
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"")
# field may not exist for a reference
except KeyError as e:
print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"")
continue
|
Zhongqilong/mykbengineer | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/setuptools/command/bdist_egg.py | 286 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
areski/django | refs/heads/master | tests/migrations/test_migrations_no_changes/0001_initial.py | 2995 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.