repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
dablak/boto | boto/ec2/reservedinstance.py | 17 | 12948 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
from boto.utils import parse_ts
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
super(ReservedInstancesOffering, self).__init__(connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
self.instance_tenancy = instance_tenancy
self.currency_code = currency_code
self.offering_type = offering_type
self.recurring_charges = recurring_charges
self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'recurringCharges':
self.recurring_charges = ResultSet([('item', RecurringCharge)])
return self.recurring_charges
elif name == 'pricingDetailsSet':
self.pricing_details = ResultSet([('item', PricingDetail)])
return self.pricing_details
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'offeringType':
self.offering_type = value
elif name == 'marketplace':
self.marketplace = True if value == 'true' else False
def describe(self):
print 'ID=%s' % self.id
print '\tInstance Type=%s' % self.instance_type
print '\tZone=%s' % self.availability_zone
print '\tDuration=%s' % self.duration
print '\tFixed Price=%s' % self.fixed_price
print '\tUsage Price=%s' % self.usage_price
print '\tDescription=%s' % self.description
def purchase(self, instance_count=1, dry_run=False):
return self.connection.purchase_reserved_instance_offering(
self.id,
instance_count,
dry_run=dry_run
)
class RecurringCharge(object):
def __init__(self, connection=None, frequency=None, amount=None):
self.frequency = frequency
self.amount = amount
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class PricingDetail(object):
def __init__(self, connection=None, price=None, count=None):
self.price = price
self.count = count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
super(ReservedInstance, self).__init__(connection, id, instance_type,
availability_zone, duration,
fixed_price, usage_price,
description)
self.instance_count = instance_count
self.state = state
self.start = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
elif name == 'start':
self.start = value
else:
super(ReservedInstance, self).endElement(name, value, connection)
class ReservedInstanceListing(EC2Object):
def __init__(self, connection=None, listing_id=None, id=None,
create_date=None, update_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.listing_id = listing_id
self.id = id
self.create_date = create_date
self.update_date = update_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'instanceCounts':
self.instance_counts = ResultSet([('item', InstanceCount)])
return self.instance_counts
elif name == 'priceSchedules':
self.price_schedules = ResultSet([('item', PriceSchedule)])
return self.price_schedules
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesListingId':
self.listing_id = value
elif name == 'reservedInstancesId':
self.id = value
elif name == 'createDate':
self.create_date = value
elif name == 'updateDate':
self.update_date = value
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
else:
setattr(self, name, value)
class InstanceCount(object):
def __init__(self, connection=None, state=None, instance_count=None):
self.state = state
self.instance_count = instance_count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'state':
self.state = value
elif name == 'instanceCount':
self.instance_count = int(value)
else:
setattr(self, name, value)
class PriceSchedule(object):
def __init__(self, connection=None, term=None, price=None,
currency_code=None, active=None):
self.connection = connection
self.term = term
self.price = price
self.currency_code = currency_code
self.active = active
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'term':
self.term = int(value)
elif name == 'price':
self.price = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'active':
self.active = True if value == 'true' else False
else:
setattr(self, name, value)
class ReservedInstancesConfiguration(object):
def __init__(self, connection=None, availability_zone=None, platform=None,
instance_count=None, instance_type=None):
self.connection = connection
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ModifyReservedInstancesResult(object):
def __init__(self, connection=None, modification_id=None):
self.connection = connection
self.modification_id = modification_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
else:
setattr(self, name, value)
class ModificationResult(object):
def __init__(self, connection=None, modification_id=None,
availability_zone=None, platform=None, instance_count=None,
instance_type=None):
self.connection = connection
self.modification_id = modification_id
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ReservedInstancesModification(object):
def __init__(self, connection=None, modification_id=None,
reserved_instances=None, modification_results=None,
create_date=None, update_date=None, effective_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.modification_id = modification_id
self.reserved_instances = reserved_instances
self.modification_results = modification_results
self.create_date = create_date
self.update_date = update_date
self.effective_date = effective_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'reservedInstancesSet':
self.reserved_instances = ResultSet([
('item', ReservedInstance)
])
return self.reserved_instances
elif name == 'modificationResultSet':
self.modification_results = ResultSet([
('item', ModificationResult)
])
return self.modification_results
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'createDate':
self.create_date = parse_ts(value)
elif name == 'updateDate':
self.update_date = parse_ts(value)
elif name == 'effectiveDate':
self.effective_date = parse_ts(value)
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
elif name == 'clientToken':
self.client_token = value
else:
setattr(self, name, value)
| mit |
afaheem88/tempest_neutron | tempest/api/volume/test_volumes_actions.py | 2 | 6440 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2ActionsTest(base.BaseVolumeTest):
@classmethod
def resource_setup(cls):
super(VolumesV2ActionsTest, cls).resource_setup()
cls.client = cls.volumes_client
cls.image_client = cls.os.image_client
# Create a test shared instance
srv_name = data_utils.rand_name(cls.__name__ + '-Instance-')
resp, cls.server = cls.servers_client.create_server(srv_name,
cls.image_ref,
cls.flavor_ref)
cls.servers_client.wait_for_server_status(cls.server['id'], 'ACTIVE')
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
def _delete_image_with_wait(self, image_id):
self.image_client.delete_image(image_id)
self.image_client.wait_for_resource_deletion(image_id)
@classmethod
def resource_cleanup(cls):
# Delete the test instance
cls.servers_client.delete_server(cls.server['id'])
cls.servers_client.wait_for_server_termination(cls.server['id'])
super(VolumesV2ActionsTest, cls).resource_cleanup()
@test.stresstest(class_setup_per='process')
@test.attr(type='smoke')
@test.services('compute')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
mountpoint = '/dev/vdc'
_, body = self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
_, body = self.client.detach_volume(self.volume['id'])
self.client.wait_for_volume_status(self.volume['id'], 'available')
@test.stresstest(class_setup_per='process')
@test.attr(type='gate')
@test.services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
_, body = self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
# NOTE(gfidente): added in reverse order because functions will be
# called in reverse order to the order they are added (LIFO)
self.addCleanup(self.client.wait_for_volume_status,
self.volume['id'],
'available')
self.addCleanup(self.client.detach_volume, self.volume['id'])
_, volume = self.client.get_volume(self.volume['id'])
self.assertIn('attachments', volume)
attachment = self.client.get_attachment_from_volume(volume)
self.assertEqual(mountpoint, attachment['device'])
self.assertEqual(self.server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@test.attr(type='gate')
@test.services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance image_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name('Image-')
_, body = self.client.upload_volume(self.volume['id'],
image_name,
CONF.volume.disk_format)
image_id = body["image_id"]
self.addCleanup(self.image_client.delete_image, image_id)
self.image_client.wait_for_image_status(image_id, 'active')
self.client.wait_for_volume_status(self.volume['id'], 'available')
@test.attr(type='gate')
def test_reserve_unreserve_volume(self):
# Mark volume as reserved.
_, body = self.client.reserve_volume(self.volume['id'])
# To get the volume info
_, body = self.client.get_volume(self.volume['id'])
self.assertIn('attaching', body['status'])
# Unmark volume as reserved.
_, body = self.client.unreserve_volume(self.volume['id'])
# To get the volume info
_, body = self.client.get_volume(self.volume['id'])
self.assertIn('available', body['status'])
def _is_true(self, val):
return val in ['true', 'True', True]
@test.attr(type='gate')
def test_volume_readonly_update(self):
# Update volume readonly true
readonly = True
_, body = self.client.update_volume_readonly(self.volume['id'],
readonly)
# Get Volume information
_, fetched_volume = self.client.get_volume(self.volume['id'])
bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
self.assertEqual(True, bool_flag)
# Update volume readonly false
readonly = False
_, body = self.client.update_volume_readonly(self.volume['id'],
readonly)
# Get Volume information
_, fetched_volume = self.client.get_volume(self.volume['id'])
bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
self.assertEqual(False, bool_flag)
class VolumesV1ActionsTest(VolumesV2ActionsTest):
_api_version = 1
| apache-2.0 |
impowski/servo | tests/wpt/web-platform-tests/tools/manifest/manifest.py | 26 | 14426 | import json
import os
from collections import defaultdict, OrderedDict
from six import iteritems
from .item import item_types, ManualTest, WebdriverSpecTest, Stub, RefTest, TestharnessTest
from .log import get_logger
from .sourcefile import SourceFile
from .utils import from_os_path, to_os_path
CURRENT_VERSION = 3
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class Manifest(object):
def __init__(self, git_rev=None, url_base="/"):
# Dict of item_type: {path: set(manifest_items)}
self._data = dict((item_type, defaultdict(set))
for item_type in item_types)
self.rev = git_rev
self.url_base = url_base
self.local_changes = LocalChanges(self)
# reftest nodes arranged as {path: set(manifest_items)}
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
def _included_items(self, include_types=None):
if include_types is None:
include_types = item_types
for item_type in include_types:
paths = self._data[item_type].copy()
for local_types, local_paths in self.local_changes.itertypes(item_type):
for path, items in iteritems(local_paths):
paths[path] = items
for path in self.local_changes.iterdeleted():
if path in paths:
del paths[path]
if item_type == "reftest":
for path, items in self.local_changes.iterdeletedreftests():
paths[path] -= items
if len(paths[path]) == 0:
del paths[path]
yield item_type, paths
def contains_path(self, path):
return any(path in paths for _, paths in self._included_items())
def add(self, item):
if item is None:
return
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
else:
self._add(item)
item.manifest = self
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def remove_path(self, path):
for item_type in item_types:
if path in self._data[item_type]:
del self._data[item_type][path]
def itertypes(self, *types):
if not types:
types = None
for item_type, items in self._included_items(types):
for item in sorted(iteritems(items)):
yield item
def __iter__(self):
for item in self.itertypes():
yield item
def __getitem__(self, path):
for _, paths in self._included_items():
if path in paths:
return paths[path]
raise KeyError
def get_reference(self, url):
if url in self.local_changes.reftest_nodes_by_url:
return self.local_changes.reftest_nodes_by_url[url]
if url in self.reftest_nodes_by_url:
return self.reftest_nodes_by_url[url]
return None
def _committed_with_path(self, rel_path):
rv = set()
for paths_items in self._data.itervalues():
rv |= paths_items.get(rel_path, set())
if rel_path in self.reftest_nodes:
rv |= self.reftest_nodes[rel_path]
return rv
def _committed_paths(self):
rv = set()
for paths_items in self._data.itervalues():
rv |= set(paths_items.keys())
return rv
def update(self,
tests_root,
url_base,
new_rev,
committed_changes=None,
local_changes=None,
remove_missing_local=False):
if local_changes is None:
local_changes = {}
if committed_changes is not None:
for rel_path, status in committed_changes:
self.remove_path(rel_path)
if status == "modified":
use_committed = rel_path in local_changes
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=use_committed)
self.extend(source_file.manifest_items())
self.local_changes = LocalChanges(self)
local_paths = set()
for rel_path, status in iteritems(local_changes):
local_paths.add(rel_path)
if status == "modified":
existing_items = self._committed_with_path(rel_path)
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=False)
local_items = set(source_file.manifest_items())
updated_items = local_items - existing_items
self.local_changes.extend(updated_items)
else:
self.local_changes.add_deleted(rel_path)
if remove_missing_local:
for path in self._committed_paths() - local_paths:
self.local_changes.add_deleted(path)
self.update_reftests()
if new_rev is not None:
self.rev = new_rev
self.url_base = url_base
def update_reftests(self):
default_reftests = self.compute_reftests(self.reftest_nodes)
all_reftest_nodes = self.reftest_nodes.copy()
all_reftest_nodes.update(self.local_changes.reftest_nodes)
for item in self.local_changes.iterdeleted():
if item in all_reftest_nodes:
del all_reftest_nodes[item]
modified_reftests = self.compute_reftests(all_reftest_nodes)
added_reftests = modified_reftests - default_reftests
# The interesting case here is not when the file is deleted,
# but when a reftest like A == B is changed to the form
# C == A == B, so that A still exists but is now a ref rather than
# a test.
removed_reftests = default_reftests - modified_reftests
dests = [(default_reftests, self._data["reftest"]),
(added_reftests, self.local_changes._data["reftest"]),
(removed_reftests, self.local_changes._deleted_reftests)]
#TODO: Warn if there exist unreachable reftest nodes
for source, target in dests:
for item in source:
target[item.path].add(item)
def compute_reftests(self, reftest_nodes):
"""Given a set of reftest_nodes, return a set of all the nodes that are top-level
tests i.e. don't have any incoming reference links."""
reftests = set()
has_inbound = set()
for path, items in iteritems(reftest_nodes):
for item in items:
for ref_url, ref_type in item.references:
has_inbound.add(ref_url)
for path, items in iteritems(reftest_nodes):
for item in items:
if item.url in has_inbound:
continue
reftests.add(item)
return reftests
def to_json(self):
out_items = {
item_type: sorted(
test.to_json()
for _, tests in iteritems(items)
for test in tests
)
for item_type, items in iteritems(self._data)
}
reftest_nodes = OrderedDict()
for key, value in sorted(iteritems(self.reftest_nodes)):
reftest_nodes[from_os_path(key)] = [v.to_json() for v in value]
rv = {"url_base": self.url_base,
"rev": self.rev,
"local_changes": self.local_changes.to_json(),
"items": out_items,
"reftest_nodes": reftest_nodes,
"version": CURRENT_VERSION}
return rv
@classmethod
def from_json(cls, tests_root, obj):
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(git_rev=obj["rev"],
url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
source_files = {}
for k, values in iteritems(obj["items"]):
if k not in item_types:
raise ManifestError
for v in values:
manifest_item = item_classes[k].from_json(self, tests_root, v,
source_files=source_files)
self._add(manifest_item)
for path, values in iteritems(obj["reftest_nodes"]):
path = to_os_path(path)
for v in values:
item = RefTest.from_json(self, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[v["url"]] = item
self.local_changes = LocalChanges.from_json(self,
tests_root,
obj["local_changes"],
source_files=source_files)
self.update_reftests()
return self
class LocalChanges(object):
def __init__(self, manifest):
self.manifest = manifest
self._data = dict((item_type, defaultdict(set)) for item_type in item_types)
self._deleted = set()
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
self._deleted_reftests = defaultdict(set)
def add(self, item):
if item is None:
return
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
else:
self._add(item)
item.manifest = self.manifest
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def add_deleted(self, path):
self._deleted.add(path)
def is_deleted(self, path):
return path in self._deleted
def itertypes(self, *types):
for item_type in types:
yield item_type, self._data[item_type]
def iterdeleted(self):
for item in self._deleted:
yield item
def iterdeletedreftests(self):
for item in iteritems(self._deleted_reftests):
yield item
def __getitem__(self, item_type):
return self._data[item_type]
def to_json(self):
reftest_nodes = {from_os_path(key): [v.to_json() for v in value]
for key, value in iteritems(self.reftest_nodes)}
deleted_reftests = {from_os_path(key): [v.to_json() for v in value]
for key, value in iteritems(self._deleted_reftests)}
rv = {"items": defaultdict(dict),
"reftest_nodes": reftest_nodes,
"deleted": [from_os_path(path) for path in self._deleted],
"deleted_reftests": deleted_reftests}
for test_type, paths in iteritems(self._data):
for path, tests in iteritems(paths):
path = from_os_path(path)
rv["items"][test_type][path] = [test.to_json() for test in tests]
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
self = cls(manifest)
if not hasattr(obj, "items"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
for test_type, paths in iteritems(obj["items"]):
for path, tests in iteritems(paths):
for test in tests:
manifest_item = item_classes[test_type].from_json(manifest,
tests_root,
test,
source_files=source_files)
self.add(manifest_item)
for path, values in iteritems(obj["reftest_nodes"]):
path = to_os_path(path)
for v in values:
item = RefTest.from_json(self.manifest, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[item.url] = item
for item in obj["deleted"]:
self.add_deleted(to_os_path(item))
for path, values in iteritems(obj.get("deleted_reftests", {})):
path = to_os_path(path)
for v in values:
item = RefTest.from_json(self.manifest, tests_root, v,
source_files=source_files)
self._deleted_reftests[path].add(item)
return self
def load(tests_root, manifest):
logger = get_logger()
# "manifest" is a path or file-like object.
if isinstance(manifest, basestring):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with open(manifest) as f:
rv = Manifest.from_json(tests_root, json.load(f))
except IOError:
rv = Manifest(None)
return rv
return Manifest.from_json(tests_root, json.load(manifest))
def write(manifest, manifest_path):
with open(manifest_path, "wb") as f:
json.dump(manifest.to_json(), f, sort_keys=True, indent=2, separators=(',', ': '))
f.write("\n")
| mpl-2.0 |
atiqueahmedziad/addons-server | src/olympia/github/tests/test_tasks.py | 3 | 2079 | import json
from django.test.utils import override_settings
import mock
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import AMOPaths
from olympia.amo.urlresolvers import reverse
from olympia.files.models import FileUpload
from olympia.github.tasks import process_results, process_webhook
from olympia.github.tests.test_github import GithubBaseTestCase
@override_settings(GITHUB_API_USER='key', GITHUB_API_TOKEN='token')
class TestGithub(AMOPaths, GithubBaseTestCase):
def get_url(self, upload_uuid):
return absolutify(
reverse('devhub.upload_detail', args=[upload_uuid]))
def test_good_results(self):
upload = FileUpload.objects.create(
validation=json.dumps({'success': True, 'errors': 0})
)
process_results(upload.pk, self.data)
self.check_status('success', target_url=self.get_url(upload.uuid))
def test_failed_results(self):
upload = FileUpload.objects.create()
process_results(upload.pk, self.data)
self.check_status('failure', description=mock.ANY)
def test_error_results(self):
upload = FileUpload.objects.create(
validation=json.dumps({
'errors': 1,
'messages': [{
'description': ['foo'],
'file': 'some/file',
'line': 3,
'type': 'error'
}]
})
)
process_results(upload.pk, self.data)
error = self.requests.post.call_args_list[0]
self.check_status(
'error',
call=error, description=mock.ANY,
target_url=self.get_url(upload.uuid))
def test_webhook(self):
upload = FileUpload.objects.create()
self.response = mock.Mock()
self.response.content = open(self.xpi_path('github-repo')).read()
self.requests.get.return_value = self.response
process_webhook(upload.pk, self.data)
self.check_status('success', target_url=self.get_url(upload.uuid))
| bsd-3-clause |
ebesson/ansible-roles | library/apm.py | 2 | 4966 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apm
short_description: Manage atom packages with apm
description:
- Manage atom packages with Atom Package Manager (apm)
version_added: 1.6
author: Hiroaki Nakamura
options:
name:
description:
- The name of a atom library to install
required: true
version:
description:
- The version to be installed
required: false
executable:
description:
- The executable location for apm.
- This is useful if apm is not in the PATH.
required: false
state:
description:
- The state of the atom library
required: false
default: present
choices: [ "present", "absent", "latest" ]
'''
EXAMPLES = '''
description: Install "project-manager" atom package.
- apm: name=project-manager state=present
description: Update the package "project-manager" to the latest version.
- apm: name=project-manager state=latest
description: Remove the package "project-manager".
- apm: name=project-manager state=absent
'''
import os
class Apm(object):
def __init__(self, module, **kwargs):
self.module = module
self.name = kwargs['name']
self.version = kwargs['version']
if kwargs['executable']:
self.executable = kwargs['executable']
else:
self.executable = module.get_bin_path('apm', True)
if kwargs['version']:
self.name_version = self.name + '@' + self.version
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (
self.module.check_mode and run_in_check_mode):
cmd = [self.executable] + args
if self.name:
cmd.append(self.name_version)
rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
return out
return ''
def list(self):
cmd = ['list']
installed = list()
missing = list()
data = self._exec(cmd, True, False)
pattern = re.compile('^(?:\xe2\x94\x9c|\xe2\x94\x94)\xe2\x94\x80\xe2\x94\x80\s+(\S+)@')
for dep in data.splitlines():
m = pattern.match(dep)
if m:
installed.append(m.group(1))
if self.name not in installed:
missing.append(self.name)
return installed, missing
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def list_outdated(self):
outdated = list()
data = self._exec(['outdated'], True, False)
pattern = re.compile('^(?:\xe2\x94\x9c|\xe2\x94\x94)\xe2\x94\x80\xe2\x94\x80\s+(\S+)')
for dep in data.splitlines():
m = pattern.match(dep)
if m and m.group(1) != '(empty)':
outdated.append(m.group(1))
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
version=dict(default=None),
executable=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest'])
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
version = module.params['version']
executable = module.params['executable']
state = module.params['state']
if not name:
module.fail_json(msg='name must be specified')
apm = Apm(module, name=name, version=version, executable=executable)
changed = False
if state == 'present':
installed, missing = apm.list()
if len(missing):
changed = True
apm.install()
elif state == 'latest':
installed, missing = apm.list()
outdated = apm.list_outdated()
if len(missing) or len(outdated):
changed = True
apm.install()
else: #absent
installed, missing = apm.list()
if name in installed:
changed = True
apm.uninstall()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
main()
| apache-2.0 |
mtekel/digitalmarketplace-api | migrations/versions/597e346723ee_.py | 3 | 1493 | """empty message
Revision ID: 597e346723ee
Revises: 56b57f01c4b4
Create Date: 2015-03-25 16:36:11.552342
"""
# revision identifiers, used by Alembic.
revision = '597e346723ee'
down_revision = '56b57f01c4b4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import column, table
from sqlalchemy import String
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('archived_services', sa.Column('status', sa.String(), nullable=True))
op.add_column('services', sa.Column('status', sa.String(), nullable=True))
op.create_check_constraint(
"ck_services_status",
"services",
"status in ('disabled', 'enabled', 'published')"
)
op.create_check_constraint(
"ck_archived_services_status",
"archived_services",
"status in ('disabled', 'enabled', 'published')"
)
services = table('services', column('status', String))
archived_services = table('archived_services', column('status', String))
op.execute(
services.update(). \
values({'status': op.inline_literal('enabled')})
)
op.execute(
archived_services.update(). \
values({'status': op.inline_literal('enabled')})
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'status')
op.drop_column('archived_services', 'status')
### end Alembic commands ###
| mit |
adamsumm/SMBRNN | LevelParsing/lpBiSML.py | 2 | 6406 | import cv2
import json
import numpy as np
import json
tileSize = 8
def clamp(val,minimum,maximum):
return max(min(val, maximum), minimum)
def findSubImageLocations(image,subImages,confidence):
allLocations = [ np.array([]) , np.array([])];
for subImage in subImages:
result = cv2.matchTemplate(image,subImage,cv2.TM_CCOEFF_NORMED)
match_indices = np.arange(result.size)[(result>confidence).flatten()]
locations = np.unravel_index(match_indices,result.shape)
allLocations[0] = np.concatenate((allLocations[0],locations[0]+(subImage.shape[0]-tileSize)))
allLocations[1] = np.concatenate((allLocations[1],locations[1]))
return allLocations
def tileToString(t):
tToStr = ['empty','solid','breakable','pickup','goodBlock','enemy','pipe','coin','bullet']
return tToStr[int(t)]
def parseLevel(levelname,tiles):
level = cv2.imread(levelname)
# plt.imshow(level);
# plt.show()
tilemap = {}
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'X'
for t in ['ground1','ground2','ground3','ground4','solid1','solid2','solid3','solid4','solid5','solid6','solid7','solid8','solid9','solid10','solid11','solid12','solid13','solid14','solid15','solid16','solid17','solid18','solid19','solid20','solid21','solid22','solid23','solid24','solid25','solid26','solid27','solid28','solid29','solid30','solid31','solid32','solid33','solid34','solid35','solid36','solid37','solid38','solid39','solid40','solid41','solid42','solid43','solid45','solid46','solid47']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = '?'
for t in ['MBlock','MBlock2']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/Bblock'
postfix = '.png'
tile = 'S'
for t in ['1','2','3','4','']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'Q'
for t in ['QBlock']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'E'
for t in ['enemy','enemy2','enemy3','enemy4','enemy5','enemy6','enemy7','enemy8','enemy9','enemy10','enemy11','fish','goomba','koopa','sphinx1','sphinx2','spike','seahorse','koopa1','fly1','fly2']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'B'
for t in ['BL']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'b'
for t in ['Bbase']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = 'o'
for t in ['coin1']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = '['
for t in ['pipeL']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = '>'
for t in ['pipeUR']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = '<'
for t in ['pipeUL']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
prefix = 'SMLTiles/'
postfix = '.png'
tile = ']'
for t in ['pipeR']:
tilemap['{}{}{}'.format(prefix,t,postfix)] = tile
levelMap = {}
tileToCertainty = {'X':.75,'?':.75,'S':.85,'Q':.85,'E':.6,'o':.7,'B':.8,'b':.8,'<':.85,'>':.85,'[':.95,']':.85}
maxX = -1
maxY = -1
tm = {}
for tileImage,tile in tilemap.items():
if tile not in tm:
tm[tile] = []
tm[tile].append(tileImage)
tiles = ['X','E','Q','S','?','o','B','b','[',']','<','>']
for tile in tiles:
images = tm[tile]
locs = findSubImageLocations(level,map(cv2.imread,images),tileToCertainty[tile])
for yy,xx in zip(locs[0],locs[1]):
xx = int(round(xx/tileSize))
yy = int(round(yy/tileSize))
levelMap[(xx,yy)] = tile
maxX = max(maxX,xx)
maxY = max(maxY,yy)
levelStr = [['-' for xx in range(maxX+1)] for yy in range(maxY+1)]
for loc,tile in levelMap.items():
levelStr[loc[1]][loc[0]] = tile
with open('input.txt','a') as outputFile:
direction = 1
offset = 0
for xx in range(maxX):
if direction == 1:
line = '('
elif direction == -1:
line = ')'
for dy in range(16):
yy = offset+direction*dy
if yy <= maxY:
line += levelStr[yy][xx]
else :
line += levelStr[maxY][xx]
if direction == 1:
direction = -1
offset = 15
elif direction == -1:
direction = 1
offset = 0
#line += ':'
outputFile.write(line)
outputFile.write('\n')
direction = -1
offset = 15
for xx in range(maxX):
if direction == 1:
line = '('
elif direction == -1:
line = ')'
for dy in range(16):
yy = offset+direction*dy
if yy <= maxY:
line += levelStr[yy][xx]
else :
line += levelStr[maxY][xx]
if direction == 1:
direction = -1
offset = 15
elif direction == -1:
direction = 1
offset = 0
#line += ':'
outputFile.write(line)
outputFile.write('\n')
levels = [
'SML/super_mario_land_11.png',
'SML/super_mario_land_12.png',
'SML/super_mario_land_13.png',
'SML/super_mario_land_21.png',
'SML/super_mario_land_22.png',
'SML/super_mario_land_31.png',
'SML/super_mario_land_32.png',
'SML/super_mario_land_33.png',
'SML/super_mario_land_41.png',
'SML/super_mario_land_42.png',
]
tiles = []
for levelFile in levels:
parseLevel(levelFile,tiles)
| mit |
mesheven/pyOCD | pyocd/board/mbed_board.py | 1 | 2220 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .board import Board
from .board_ids import BOARD_ID_TO_INFO
import logging
log = logging.getLogger('mbed_board')
class MbedBoard(Board):
"""
This class inherits from Board and is specific to mbed boards.
Particularly, this class allows you to dynamically determine
the type of all boards connected based on the id board
"""
def __init__(self, session, target=None):
"""
Init the board
"""
target = session.options.get('target_override', target)
unique_id = session.probe.unique_id
try:
board_id = unique_id[0:4]
board_info = BOARD_ID_TO_INFO[board_id]
self._name = board_info.name
self.native_target = board_info.target
except KeyError:
board_info = None
self._name = "Unknown Board"
self.native_target = None
# Unless overridden use the native target
if target is None:
target = self.native_target
if target is None:
log.warning("Unsupported board found %s", board_id)
target = "cortex_m"
super(MbedBoard, self).__init__(session, target)
# Set test binary if not already set.
if (board_info is not None) and (self._test_binary is None):
self._test_binary = board_info.binary
@property
def name(self):
"""
Return board name
"""
return self._name
@property
def description(self):
"""
Return info on the board
"""
return self.name + " [" + self.target_type + "]"
| apache-2.0 |
sumihai-tekindo/account_sicepat | invoice_supplier_dept_seq/models/account_invoice.py | 1 | 9250 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 STI (<https://github.com/sumihai-tekindo>).
# @author Pambudi Satria <pambudi.satria@yahoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import osv, fields
from openerp.osv.expression import get_unaccent_wrapper
class account_invoice(osv.Model):
_inherit = "account.invoice"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', readonly=True, states={'draft': [('readonly', False)]}, ondelete='set null'),
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
obj_sequence = self.pool.get('ir.sequence')
obj_journal = self.pool.get('account.journal')
obj_dept = self.pool.get('account.invoice.department')
number = ''
journal = self._default_journal(cr, uid, context)
date_invoice = vals.get('date_invoice', fields.date.context_today(self, cr, uid, context))
if vals.get('journal_id') and vals['journal_id']:
journal = obj_journal.browse(cr, uid, vals['journal_id'])
if ('department_id' not in vals) or ('department_id' in vals and not vals['department_id']):
vals['department_id'] = journal.department_id and journal.department_id.id or False
if vals.get('department_id') and vals['department_id']:
department = obj_dept.browse(cr, uid, vals['department_id'])
if context.get('type', False) in ('in_invoice', 'in_refund') or (vals.get('type') and vals['type'] in ('in_invoice', 'in_refund')):
if journal.sequence_id:
ctx = dict(context)
ctx['ir_sequence_date'] = date_invoice
number = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, ctx)
else:
raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.'))
if number:
number = "%s/%s" % (department.name, number)
vals['internal_number'] = number
res_id = super(account_invoice, self).create(cr, uid, vals, context)
if context.get('type', False) in ('in_invoice', 'in_refund') or (vals.get('type') and vals['type'] in ('in_invoice', 'in_refund')):
self.write(cr, uid, [res_id], {'number': number})
return res_id
@api.multi
def action_cancel(self):
res = super(account_invoice, self).action_cancel()
if self.type in ('in_invoice', 'in_refund'):
self.write({'number': self.internal_number})
return res
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
for move in move_lines:
move[2]['department_id'] = self.department_id and self.department_id.id or False
return move_lines
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', ondelete='set null', required=True),
}
class account_move_line(osv.osv):
_inherit = "account.move.line"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', ondelete='set null'),
}
def _query_get(self, cr, uid, obj='l', context=None):
result = super(account_move_line, self)._query_get(cr, uid, obj=obj, context=context)
context = dict(context or {})
query = ''
query_params = {}
if 'department_ids' in context:
if context.get('department_ids'):
query_params['department_ids'] = tuple(context['department_ids'])
query += ' AND ' + obj + '.department_id IN %(department_ids)s'
else:
query += ' AND ' + obj + '.department_id IS null'
if 'analytic_ids' in context:
if context.get('analytic_ids'):
analytics = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', context['analytic_ids'])], context=context)
query_params['analytic_ids'] = tuple(analytics)
query += ' AND ' + obj + '.analytic_account_id IN %(analytic_ids)s'
else:
query += ' AND ' + obj + '.analytic_account_id IS null'
if query:
result += cr.mogrify(query, query_params)
return result
def create(self, cr, uid, vals, context=None, check=True):
result = super(account_move_line, self).create(cr, uid, vals, context=context)
if result and ('department_id' not in vals) and ('journal_id' in vals) and vals['journal_id']:
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context=context)
department = journal.department_id
self.write(cr, uid, result, {'department_id': department and department.id or False}, context=context)
return result
class account_invoice_department(osv.Model):
_name = "account.invoice.department"
_order = "description asc"
_columns = {
'name': fields.char('Code', size=4, required=True, copy=False),
'description': fields.char('Description'),
'active': fields.boolean('Active'),
'user_id': fields.many2one('res.users', string='Manager'),
}
_defaults = {
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if context.get('description_only'):
name = record.description
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
query = """SELECT id
FROM account_invoice_department
{where} ({name} {operator} {percent}
OR {description} {operator} {percent})
ORDER BY {description}
""".format(where=where_str, operator=operator,
name=unaccent('name'),
description=unaccent('description'),
percent=unaccent('%s'))
where_clause_params += [search_name, search_name]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return super(account_invoice_department, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
| gpl-3.0 |
iterativ/djangojames | djangojames/templatetags/truncate.py | 1 | 2059 | # -*- coding: utf-8 -*-
#
# ITerativ GmbH
# http://www.iterativ.ch/
#
# Copyright (c) 2012 ITerativ GmbH. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Created on Mar 2, 2012
# @author: github.com/maersu
from __future__ import unicode_literals
from django.template import Library
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.template.defaultfilters import stringfilter
import re
register = Library()
def truncate_string(s, num):
s = force_unicode(s)
newlength = int(num)
if len(s) > newlength:
length = newlength - 3
if s[length-1] == ' ' or s[length] == ' ':
s = s[:length].strip()
else:
words = re.split(' *', s[:length])
if len(words) > 1:
del words[-1]
s = ' '.join(words)
s += ' ...'
return s
truncate_chars = allow_lazy(truncate_string, unicode)
@register.filter
@stringfilter
def truncatestring(value, arg):
"""
Truncates the string after a number of characters. It respects word boundaries and keeps newlines.
Argument: Number of characters.
"""
try:
length = int(arg)
except ValueError: # If the argument is not a valid integer.
return value # Fail silently.
return truncate_chars(value, length)
truncatestring.is_safe = True | gpl-2.0 |
dimasad/numpy | numpy/distutils/command/build.py | 187 | 1618 | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| bsd-3-clause |
XueqingLin/tensorflow | tensorflow/contrib/training/python/training/bucket_ops.py | 7 | 16249 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_deserialize_sparse_tensors = input_py._deserialize_sparse_tensors
_dtypes = input_py._dtypes
_serialize_sparse_tensors = input_py._serialize_sparse_tensors
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue
(python int or int32 scalar).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
(tensor_list, sparse_info) = _serialize_sparse_tensors(
tensor_list, enqueue_many=False)
# Round-trip batch_size to a tensor, and possibly back
batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(batch_size)
batch_size = (
static_batch_size if static_batch_size is not None else batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = (
"%s_%d" % (shared_name, i) if shared_name is not None else None)
bucket_queues.append(
queue_creator(capacity=capacity,
dtypes=types,
shapes=shapes,
shared_name=shared_name_i, name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if allow_smaller_final_batch else static_batch_size)
bucket_shapes = [tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name, name="top_queue")
def enqueue_which():
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i),
control_flow_ops.no_op)
for i in range(num_buckets)]
return control_flow_ops.group(*enqueues, name="group_enqueues")
if keep_input is not None:
# TODO(ebrevdo): Expand keep_input param to core training
# methods, and pipe through to _serialize_sparse_tensors; so
# that expensive serialization is guarded by keep_input.
maybe_enqueue = control_flow_ops.cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
else:
maybe_enqueue = enqueue_which()
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] +
which_dequeue(q)(batch_size, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, q in enumerate(bucket_queues)]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(
errors.OutOfRangeError, errors.CancelledError)))
queue_runner.add_queue_runner(queue_runner.QueueRunner(
top_queue, bucket_enqueue_ops,
queue_closed_exception_types=(
errors.OutOfRangeError, errors.CancelledError)))
for q in bucket_queues:
logging_ops.scalar_summary(
"bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
logging_ops.scalar_summary(
"bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) * (1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue
(python int or int32 scalar).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s"
% bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError(
"bucket boundaries must be integers, but saw: %s and %s" % (s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = [
"bucket",
"bucket_by_sequence_length"
]
| apache-2.0 |
gem/oq-engine | openquake/hazardlib/tests/acceptance/_peer_test_data.py | 1 | 12043 | """
Data is taken from the report "PEER 2010/106 - Verification of Probabilistic
Seismic Hazard Analysis Computer Programs" by Patricia Thomas, Ivan Wong,
Norman Abrahamson, see
`http://peer.berkeley.edu/publications/peer_reports/reports_2010/web_PEER_10106_THOMASetal.pdf`_.
"""
from openquake.hazardlib.geo import Polygon, Point, Line
from openquake.hazardlib.site import Site
from openquake.hazardlib.imt import PGA
from openquake.hazardlib.mfd import TruncatedGRMFD, EvenlyDiscretizedMFD
from openquake.hazardlib.source import SimpleFaultSource
from openquake.hazardlib.pmf import PMF
import numpy
IMT = PGA()
# page 12
SET1_RUPTURE_ASPECT_RATIO = 2.0
# page A-3
SET1_CASE10_SOURCE_POLYGON = SET1_CASE11_SOURCE_POLYGON = Polygon([
Point(-122.000, 38.901),
Point(-121.920, 38.899),
Point(-121.840, 38.892),
Point(-121.760, 38.881),
Point(-121.682, 38.866),
Point(-121.606, 38.846),
Point(-121.532, 38.822),
Point(-121.460, 38.794),
Point(-121.390, 38.762),
Point(-121.324, 38.727),
Point(-121.261, 38.688),
Point(-121.202, 38.645),
Point(-121.147, 38.600),
Point(-121.096, 38.551),
Point(-121.050, 38.500),
Point(-121.008, 38.446),
Point(-120.971, 38.390),
Point(-120.940, 38.333),
Point(-120.913, 38.273),
Point(-120.892, 38.213),
Point(-120.876, 38.151),
Point(-120.866, 38.089),
Point(-120.862, 38.026),
Point(-120.863, 37.963),
Point(-120.869, 37.900),
Point(-120.881, 37.838),
Point(-120.899, 37.777),
Point(-120.921, 37.717),
Point(-120.949, 37.658),
Point(-120.982, 37.601),
Point(-121.020, 37.545),
Point(-121.063, 37.492),
Point(-121.110, 37.442),
Point(-121.161, 37.394),
Point(-121.216, 37.349),
Point(-121.275, 37.308),
Point(-121.337, 37.269),
Point(-121.403, 37.234),
Point(-121.471, 37.203),
Point(-121.542, 37.176),
Point(-121.615, 37.153),
Point(-121.690, 37.133),
Point(-121.766, 37.118),
Point(-121.843, 37.108),
Point(-121.922, 37.101),
Point(-122.000, 37.099),
Point(-122.078, 37.101),
Point(-122.157, 37.108),
Point(-122.234, 37.118),
Point(-122.310, 37.133),
Point(-122.385, 37.153),
Point(-122.458, 37.176),
Point(-122.529, 37.203),
Point(-122.597, 37.234),
Point(-122.663, 37.269),
Point(-122.725, 37.308),
Point(-122.784, 37.349),
Point(-122.839, 37.394),
Point(-122.890, 37.442),
Point(-122.937, 37.492),
Point(-122.980, 37.545),
Point(-123.018, 37.601),
Point(-123.051, 37.658),
Point(-123.079, 37.717),
Point(-123.101, 37.777),
Point(-123.119, 37.838),
Point(-123.131, 37.900),
Point(-123.137, 37.963),
Point(-123.138, 38.026),
Point(-123.134, 38.089),
Point(-123.124, 38.151),
Point(-123.108, 38.213),
Point(-123.087, 38.273),
Point(-123.060, 38.333),
Point(-123.029, 38.390),
Point(-122.992, 38.446),
Point(-122.950, 38.500),
Point(-122.904, 38.551),
Point(-122.853, 38.600),
Point(-122.798, 38.645),
Point(-122.739, 38.688),
Point(-122.676, 38.727),
Point(-122.610, 38.762),
Point(-122.540, 38.794),
Point(-122.468, 38.822),
Point(-122.394, 38.846),
Point(-122.318, 38.866),
Point(-122.240, 38.881),
Point(-122.160, 38.892),
Point(-122.080, 38.899),
])
# page A-3
SET1_CASE10_SITE1 = SET1_CASE11_SITE1 = Site(
location=Point(-122.0, 38.0), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE10_SITE2 = SET1_CASE11_SITE2 = Site(
location=Point(-122.0, 37.550), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE10_SITE3 = SET1_CASE11_SITE3 = Site(
location=Point(-122.0, 37.099), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE10_SITE4 = SET1_CASE11_SITE4 = Site(
location=Point(-122.0, 36.874), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
# page 14
SET1_CASE10_MFD = TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,
max_mag=6.5, bin_width=0.1)
SET1_CASE11_MFD = SET1_CASE10_MFD
# page 14
SET1_CASE10_HYPOCENTER_DEPTH = 5.0
# page A-15
SET1_CASE10_IMLS = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
SET1_CASE10_SITE1_POES = [
3.87E-02, 2.19E-02, 2.97E-03, 9.22E-04, 3.59E-04,
1.31E-04, 4.76E-05, 1.72E-05, 5.38E-06, 1.18E-06
]
SET1_CASE10_SITE2_POES = [
3.87E-02, 1.82E-02, 2.96E-03, 9.21E-04, 3.59E-04,
1.31E-04, 4.76E-05, 1.72E-05, 5.37E-06, 1.18E-06
]
SET1_CASE10_SITE3_POES = [
3.87E-02, 9.32E-03, 1.39E-03, 4.41E-04, 1.76E-04,
6.47E-05, 2.27E-05, 8.45E-06, 2.66E-06, 5.84E-07
]
SET1_CASE10_SITE4_POES = [
3.83E-02, 5.33E-03, 1.25E-04, 1.63E-06, 0,
0, 0, 0, 0, 0
]
# page 21
SET1_CASE11_HYPOCENTERS = [5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
# page A-16
SET1_CASE11_IMLS = [0.001, 0.01, 0.05, 0.1, 0.15,
0.2, 0.25, 0.3, 0.35, 0.4, 0.45]
SET1_CASE11_SITE1_POES = [
3.87E-02, 2.18E-02, 2.83E-03, 7.91E-04, 2.43E-04,
7.33E-05, 2.23E-05, 6.42E-06, 1.31E-06, 1.72E-07,
3.05E-09
]
SET1_CASE11_SITE2_POES = [
3.87E-02, 1.81E-02, 2.83E-03, 7.90E-04, 2.44E-04,
7.32E-05, 2.21E-05, 6.50E-06, 1.30E-06, 1.60E-07,
3.09E-09
]
SET1_CASE11_SITE3_POES = [
3.87E-02, 9.27E-03, 1.32E-03, 3.79E-04, 1.18E-04,
3.60E-05, 1.08E-05, 2.95E-06, 6.18E-07, 7.92E-08,
1.34E-09
]
SET1_CASE11_SITE4_POES = [
3.84E-02, 5.33E-03, 1.18E-04, 1.24E-06, 0,
0, 0, 0, 0, 0,
0
]
# Starting from the input data as defined in the PEER Report page 13:
#
# magnitude = 6.0
# b_value = -0.9
# slip_rate = 2e-3 # m/year
# rigidity = 3e10 # N/m^2
# fault_length = 25.0 * 1e3 # m
# fault_width = 12.0 * 1e3 # m
#
# The total seismic moment rate can be computed as:
#
# seismic_moment_rate = rigidity * fault_length * fault_width * slip_rate
#
# From which we can derived the incremental a value:
#
# a_incremental = log10(seismic_moment_rate) - (1.5 + b_value) * magnitude - 9.05
#
# and finally the rate:
#
# rate = 10 ** (a_incremental + b_value * magnitude)
SET1_CASE2_MFD = EvenlyDiscretizedMFD(min_mag=6.0, bin_width=0.01,
occurrence_rates=[0.0160425168864])
SET1_CASE1TO9_RAKE = 0
# page A-3
SET1_CASE1TO9_FAULT_TRACE = Line([Point(-122.0, 38.0),
Point(-122.0, 38.22480)])
# page A-17
SET1_CASE1TO9_UPPER_SEISMOGENIC_DEPTH = 0.0
SET1_CASE1TO9_LOWER_SEISMOGENIC_DEPTH = 12.0
SET1_CASE1TO9_DIP = 90
# page A-3
SET1_CASE1TO9_SITE1 = Site(
location=Point(-122.000, 38.113), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE2 = Site(
location=Point(-122.114, 38.113), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE3 = Site(
location=Point(-122.570, 38.111), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE4 = Site(
location=Point(-122.000, 38.000), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE5 = Site(
location=Point(-122.000, 37.910), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE6 = Site(
location=Point(-122.000, 38.225), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
SET1_CASE1TO9_SITE7 = Site(
location=Point(-121.886, 38.113), vs30=800.0, vs30measured=True,
z1pt0=1.0, z2pt5=2.0
)
# page A-8
SET1_CASE2_IMLS = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3,
0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65]
SET1_CASE2_SITE1_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02,
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.18E-02,
8.23E-03, 5.23E-03, 2.64E-03, 3.63E-04, 0.00E+00
]
SET1_CASE2_SITE2_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02,
1.59E-02, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00
]
SET1_CASE2_SITE3_POES = [
1.59E-02, 1.59E-02, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00
]
SET1_CASE2_SITE4_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02,
1.58E-02, 1.20E-02, 8.64E-03, 5.68E-03, 3.09E-03,
1.51E-03, 6.08E-04, 1.54E-04, 2.92E-06, 0.00E+00
]
SET1_CASE2_SITE5_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.56E-02, 7.69E-03,
1.60E-03, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00
]
SET1_CASE2_SITE6_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02,
1.58E-02, 1.20E-02, 8.64E-03, 5.68E-03, 3.09E-03,
1.51E-03, 6.08E-04, 1.54E-04, 2.92E-06, 0.00E+00
]
SET1_CASE2_SITE7_POES = [
1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02, 1.59E-02,
1.59E-02, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00
]
# page 13
SET1_CASE5_MFD = TruncatedGRMFD(a_val=3.1292, b_val=0.9, min_mag=5.0,
max_mag=6.5, bin_width=0.1)
# page A-9
SET1_CASE5_IMLS = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
0.45, 0.5, 0.55, 0.6, 0.7, 0.8]
SET1_CASE5_SITE4_POES = [
3.99E-02, 3.99E-02, 3.98E-02, 2.99E-02, 2.00E-02,
1.30E-02, 8.58E-03, 5.72E-03, 3.88E-03, 2.69E-03,
1.91E-03, 1.37E-03, 9.74E-04, 6.75E-04, 2.52E-04,
0.00E+00
]
SET1_CASE5_SITE5_POES = [
3.99E-02, 3.99E-02, 3.14E-02, 1.21E-02, 4.41E-03,
1.89E-03, 7.53E-04, 1.25E-04, 0.00E+00, 0.00E+00,
0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00, 0.00E+00,
0.00E+00
]
SET1_CASE5_SITE6_POES = [
3.99E-02, 3.99E-02, 3.98E-02, 2.99E-02, 2.00E-02,
1.30E-02, 8.58E-03, 5.72E-03, 3.88E-03, 2.69E-03,
1.91E-03, 1.37E-03, 9.74E-04, 6.75E-04, 2.52E-04,
0.00E+00
]
# page A-11
SET1_CASE5_SITE1_POES = [
4.00E-02, 4.00E-02, 4.00E-02, 3.99E-02, 3.46E-02,
2.57E-02, 1.89E-02, 1.37E-02, 9.88E-03, 6.93E-03,
4.84E-03, 3.36E-03, 2.34E-03, 1.52E-03, 5.12E-04,
0
]
SET1_CASE5_SITE2_POES = [
4.00E-02, 4.00E-02, 4.00E-02, 3.31E-02, 1.22E-02,
4.85E-03, 1.76E-03, 2.40E-04, 0, 0,
0, 0, 0, 0, 0,
0
]
SET1_CASE5_SITE3_POES = [
4.00E-02, 4.00E-02, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0
]
SET1_CASE5_SITE7_POES = [
4.00E-02, 4.00E-02, 4.00E-02, 3.31E-02, 1.22E-02,
4.85E-03, 1.76E-03, 2.40E-04, 0, 0,
0, 0, 0, 0, 0,
0
]
# rupture-related data for case 2 source
SET1_CASE2_SOURCE_DATA = {
'num_rups_strike': 12,
'num_rups_dip': 6,
'mag': 6.,
'rake': 0.,
'tectonic_region_type': 'Active Shallow Crust',
'pmf': PMF([(0.9997772, 0), (0.0002228, 1)]),
'lons': numpy.zeros((8, 15)) - 122.,
'lats': [
numpy.tile(numpy.linspace(38.0, 38.126, 15), (8, 1)),
numpy.tile(numpy.linspace(38.009, 38.135, 15), (8, 1)),
numpy.tile(numpy.linspace(38.018, 38.144, 15), (8, 1)),
numpy.tile(numpy.linspace(38.027, 38.153, 15), (8, 1)),
numpy.tile(numpy.linspace(38.036, 38.162, 15), (8, 1)),
numpy.tile(numpy.linspace(38.045, 38.171, 15), (8, 1)),
numpy.tile(numpy.linspace(38.054, 38.180, 15), (8, 1)),
numpy.tile(numpy.linspace(38.063, 38.189, 15), (8, 1)),
numpy.tile(numpy.linspace(38.072, 38.198, 15), (8, 1)),
numpy.tile(numpy.linspace(38.081, 38.207, 15), (8, 1)),
numpy.tile(numpy.linspace(38.090, 38.216, 15), (8, 1)),
numpy.tile(numpy.linspace(38.099, 38.225, 15), (8, 1)),
],
'depths':[
numpy.tile(numpy.linspace(0., 7., 8).reshape(-1, 1), (1, 15)),
numpy.tile(numpy.linspace(1., 8., 8).reshape(-1, 1), (1, 15)),
numpy.tile(numpy.linspace(2., 9., 8).reshape(-1, 1), (1, 15)),
numpy.tile(numpy.linspace(3., 10., 8).reshape(-1, 1), (1, 15)),
numpy.tile(numpy.linspace(4., 11., 8).reshape(-1, 1), (1, 15)),
numpy.tile(numpy.linspace(5., 12., 8).reshape(-1, 1), (1, 15)),
],
'hypo_lons': numpy.zeros((6, 12)) - 122.,
'hypo_lats': numpy.tile(numpy.linspace(38.063, 38.162, 12), (6, 1)),
'hypo_depths': \
numpy.tile(numpy.linspace(3.5, 8.5, 6).reshape(-1, 1), (1, 12))
}
| agpl-3.0 |
sysadminmatmoz/OCB | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 9 | 32596 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description = "Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = dict.fromkeys(ids, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
cr.execute("""
SELECT sheet_id as id,
sum(total_attendance) as total_attendance,
sum(total_timesheet) as total_timesheet,
sum(total_difference) as total_difference
FROM hr_timesheet_sheet_sheet_day
WHERE sheet_id IN %s
GROUP BY sheet_id
""", (tuple(ids),))
res.update(dict((x.pop('id'), x) for x in cr.dictfetchall()))
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise UserError(_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise UserError(_('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise UserError(_('In order to create a timesheet for this employee, you must link him/her to a user.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise UserError(_('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise UserError(_('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise UserError(_('In order to create a timesheet for this employee, you must link the employee to a product.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise UserError(_('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise UserError(_('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_attendances(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
attendances_groups = self.pool['hr.attendance'].read_group(cr, uid, [('sheet_id' , 'in' , ids)], ['sheet_id'], 'sheet_id', context=context)
for attendances in attendances_groups:
res[attendances['sheet_id'][0]] = attendances['sheet_id_count']
return res
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('account.analytic.line', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
track_visibility='onchange',
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'attendance_count': fields.function(_count_attendances, type='integer', string="Attendances"),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return fields.date.context_today(self, cr, uid, context)
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return fields.date.context_today(self, cr, uid, context)
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.employee_id.user_id and sheet.employee_id.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
# week number according to ISO 8601 Calendar
return [(r['id'], _('Week ')+str(datetime.strptime(r['date_from'], '%Y-%m-%d').isocalendar()[1])) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise UserError(_('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise UserError(_('You cannot delete a timesheet which have attendance entries.'))
toremove = []
analytic_timesheet = self.pool.get('account.analytic.line')
for sheet in self.browse(cr, uid, ids, context=context):
for timesheet in sheet.timesheet_ids:
toremove.append(timesheet.id)
analytic_timesheet.unlink(cr, uid, toremove, context=context)
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'confirm':
return 'hr_timesheet_sheet.mt_timesheet_confirmed'
elif 'state' in init_values and record.state == 'done':
return 'hr_timesheet_sheet.mt_timesheet_approved'
return super(hr_timesheet_sheet, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
# get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
# if we got the dates from and to from the timesheet and if the default date is in between, we use the default
# but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
# if we don't get the dates from the timesheet, we return the default value from super()
return res
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
if not ts_line.is_timesheet:
continue
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id),
('state', 'in', ['draft', 'new'])],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM account_analytic_line l
WHERE %(date_to)s >= l.date
AND %(date_from)s <= l.date
AND %(user_id)s = l.user_id
AND l.is_timesheet = True
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (lambda self,cr,uid,ids,context=None: ids, ['user_id', 'date'], 10),
},
),
}
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(account_analytic_line, self).write(cr, uid, ids, values,context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(account_analytic_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise UserError(_('You cannot modify an entry in a confirmed timesheet.'))
return True
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE %(date_to)s >= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(date_from)s <= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise UserError(_('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise UserError(_('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise UserError(_('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise UserError(_('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
timezone,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(orphan_attendances) != 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC'))
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(l.id) as id,
p.tz as timezone,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0 as orphan_attendances,
0.0 as total_attendance
from
account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = l.sheet_id
JOIN hr_employee e ON s.employee_id = e.id
JOIN resource_resource r ON e.resource_id = r.id
LEFT JOIN res_users u ON r.user_id = u.id
LEFT JOIN res_partner p ON u.partner_id = p.id
group by l.date::date, s.id, timezone
) union (
select
-min(a.id) as id,
p.tz as timezone,
(a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END) as orphan_attendances,
SUM(((EXTRACT(hour FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))) * 60) + EXTRACT(minute FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')))) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
JOIN hr_employee e
ON a.employee_id = e.id
JOIN resource_resource r
ON e.resource_id = r.id
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE action in ('sign_in', 'sign_out')
group by (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date, s.id, timezone
)) AS foo
GROUP BY name, sheet_id, timezone
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'unit_amount', 'user_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(l.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total
from
account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id)
group by l.account_id, s.id
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
| agpl-3.0 |
zouyapeng/horizon-newtouch | openstack_dashboard/api/fwaas.py | 22 | 7990 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Rule(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall rule."""
def get_dict(self):
rule_dict = self._apidict
rule_dict['rule_id'] = rule_dict['id']
return rule_dict
class Policy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall policy."""
def get_dict(self):
policy_dict = self._apidict
policy_dict['policy_id'] = policy_dict['id']
return policy_dict
class Firewall(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall."""
def get_dict(self):
firewall_dict = self._apidict
firewall_dict['firewall_id'] = firewall_dict['id']
return firewall_dict
def rule_create(request, **kwargs):
"""Create a firewall rule
:param request: request context
:param name: name for rule
:param description: description for rule
:param protocol: protocol for rule
:param action: action for rule
:param source_ip_address: source IP address or subnet
:param source_port: integer in [1, 65535] or range in a:b
:param destination_ip_address: destination IP address or subnet
:param destination_port: integer in [1, 65535] or range in a:b
:param shared: boolean (default false)
:param enabled: boolean (default true)
:return: Rule object
"""
body = {'firewall_rule': kwargs}
rule = neutronclient(request).create_firewall_rule(
body).get('firewall_rule')
return Rule(rule)
def rule_list(request, **kwargs):
return _rule_list(request, expand_policy=True, **kwargs)
def _rule_list(request, expand_policy, **kwargs):
rules = neutronclient(request).list_firewall_rules(
**kwargs).get('firewall_rules')
if expand_policy:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for rule in rules:
rule['policy'] = policy_dict.get(rule['firewall_policy_id'])
return [Rule(r) for r in rules]
def rule_get(request, rule_id):
return _rule_get(request, rule_id, expand_policy=True)
def _rule_get(request, rule_id, expand_policy):
rule = neutronclient(request).show_firewall_rule(
rule_id).get('firewall_rule')
if expand_policy:
if rule['firewall_policy_id']:
rule['policy'] = _policy_get(request, rule['firewall_policy_id'],
expand_rule=False)
else:
rule['policy'] = None
return Rule(rule)
def rule_delete(request, rule_id):
neutronclient(request).delete_firewall_rule(rule_id)
def rule_update(request, rule_id, **kwargs):
body = {'firewall_rule': kwargs}
rule = neutronclient(request).update_firewall_rule(
rule_id, body).get('firewall_rule')
return Rule(rule)
def policy_create(request, **kwargs):
"""Create a firewall policy
:param request: request context
:param name: name for policy
:param description: description for policy
:param firewall_rules: ordered list of rules in policy
:param shared: boolean (default false)
:param audited: boolean (default false)
:return: Policy object
"""
body = {'firewall_policy': kwargs}
policy = neutronclient(request).create_firewall_policy(
body).get('firewall_policy')
return Policy(policy)
def policy_list(request, **kwargs):
return _policy_list(request, expand_rule=True, **kwargs)
def _policy_list(request, expand_rule, **kwargs):
policies = neutronclient(request).list_firewall_policies(
**kwargs).get('firewall_policies')
if expand_rule:
rules = _rule_list(request, expand_policy=False)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
for p in policies:
p['rules'] = [rule_dict.get(rule) for rule in p['firewall_rules']]
return [Policy(p) for p in policies]
def policy_get(request, policy_id):
return _policy_get(request, policy_id, expand_rule=True)
def _policy_get(request, policy_id, expand_rule):
policy = neutronclient(request).show_firewall_policy(
policy_id).get('firewall_policy')
if expand_rule:
policy_rules = policy['firewall_rules']
if policy_rules:
rules = _rule_list(request, expand_policy=False,
firewall_policy_id=policy_id)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
policy['rules'] = [rule_dict.get(rule) for rule in policy_rules]
else:
policy['rules'] = []
return Policy(policy)
def policy_delete(request, policy_id):
neutronclient(request).delete_firewall_policy(policy_id)
def policy_update(request, policy_id, **kwargs):
body = {'firewall_policy': kwargs}
policy = neutronclient(request).update_firewall_policy(
policy_id, body).get('firewall_policy')
return Policy(policy)
def policy_insert_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_insert_rule(
policy_id, kwargs)
return Policy(policy)
def policy_remove_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_remove_rule(
policy_id, kwargs)
return Policy(policy)
def firewall_create(request, **kwargs):
"""Create a firewall for specified policy
:param request: request context
:param name: name for firewall
:param description: description for firewall
:param firewall_policy_id: policy id used by firewall
:param shared: boolean (default false)
:param admin_state_up: boolean (default true)
:return: Firewall object
"""
body = {'firewall': kwargs}
firewall = neutronclient(request).create_firewall(body).get('firewall')
return Firewall(firewall)
def firewall_list(request, **kwargs):
return _firewall_list(request, expand_policy=True, **kwargs)
def _firewall_list(request, expand_policy, **kwargs):
firewalls = neutronclient(request).list_firewalls(
**kwargs).get('firewalls')
if expand_policy:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for fw in firewalls:
fw['policy'] = policy_dict.get(fw['firewall_policy_id'])
return [Firewall(f) for f in firewalls]
def firewall_get(request, firewall_id):
return _firewall_get(request, firewall_id, expand_policy=True)
def _firewall_get(request, firewall_id, expand_policy):
firewall = neutronclient(request).show_firewall(
firewall_id).get('firewall')
if expand_policy:
policy_id = firewall['firewall_policy_id']
if policy_id:
firewall['policy'] = _policy_get(request, policy_id,
expand_rule=False)
else:
firewall['policy'] = None
return Firewall(firewall)
def firewall_delete(request, firewall_id):
neutronclient(request).delete_firewall(firewall_id)
def firewall_update(request, firewall_id, **kwargs):
body = {'firewall': kwargs}
firewall = neutronclient(request).update_firewall(
firewall_id, body).get('firewall')
return Firewall(firewall)
| apache-2.0 |
felix-d/boto | scripts/rebuild_endpoints.py | 79 | 1281 | import json
from pyquery import PyQuery as pq
import requests
class FetchError(Exception):
pass
def fetch_endpoints():
# We utilize what the Java SDK publishes as a baseline.
resp = requests.get('https://raw2.github.com/aws/aws-sdk-java/master/src/main/resources/etc/regions.xml')
if int(resp.status_code) != 200:
raise FetchError("Failed to fetch the endpoints. Got {0}: {1}".format(
resp.status,
resp.body
))
return resp.text
def parse_xml(raw_xml):
return pq(raw_xml, parser='xml')
def build_data(doc):
data = {}
# Run through all the regions. These have all the data we need.
for region_elem in doc('Regions').find('Region'):
region = pq(region_elem, parser='xml')
region_name = region.find('Name').text()
for endp in region.find('Endpoint'):
service_name = endp.find('ServiceName').text
endpoint = endp.find('Hostname').text
data.setdefault(service_name, {})
data[service_name][region_name] = endpoint
return data
def main():
raw_xml = fetch_endpoints()
doc = parse_xml(raw_xml)
data = build_data(doc)
print(json.dumps(data, indent=4, sort_keys=True))
if __name__ == '__main__':
main()
| mit |
lmprice/ansible | test/runner/lib/cloud/opennebula.py | 16 | 1681 | """OpenNebula plugin for integration tests."""
import os
from lib.cloud import (
CloudProvider,
CloudEnvironment
)
from lib.util import (
find_executable,
ApplicationError,
display,
is_shippable,
)
class OpenNebulaCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def filter(self, targets, exclude):
""" no need to filter modules, they can either run from config file or from fixtures"""
pass
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(OpenNebulaCloudProvider, self).setup()
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
display.info('No config file provided, will run test from fixtures')
config = self._read_config_template()
values = dict(
URL="http://localhost/RPC2",
USERNAME='oneadmin',
PASSWORD='onepass',
FIXTURES='true',
REPLAY='true',
)
config = self._populate_config_template(config, values)
self._write_config(config)
class OpenNebulaCloudEnvironment(CloudEnvironment):
"""
Updates integration test environment after delegation. Will setup the config file as parameter.
"""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
cmd.append('-e')
cmd.append('@%s' % self.config_path)
cmd.append('-e')
cmd.append('resource_prefix=%s' % self.resource_prefix)
| gpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/ete2/parser/fasta.py | 3 | 4335 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
import os
import string
import textwrap
from sys import stderr as STDERR
def read_fasta(source, obj=None, header_delimiter="\t", fix_duplicates=True):
""" Reads a collection of sequences econded in FASTA format."""
if obj is None:
from ete2.coretype import seqgroup
SC = seqgroup.SeqGroup()
else:
SC = obj
names = set([])
seq_id = -1
# Prepares handle from which read sequences
if os.path.isfile(source):
if source.endswith('.gz'):
import gzip
_source = gzip.open(source)
else:
_source = open(source, "rU")
else:
_source = iter(source.split("\n"))
seq_name = None
for line in _source:
line = line.strip()
if line.startswith('#') or not line:
continue
# Reads seq number
elif line.startswith('>'):
# Checks if previous name had seq
if seq_id>-1 and SC.id2seq[seq_id] == "":
raise Exception, "No sequence found for "+seq_name
seq_id += 1
# Takes header info
seq_header_fields = map(string.strip, line[1:].split(header_delimiter))
seq_name = seq_header_fields[0]
# Checks for duplicated seq names
if fix_duplicates and seq_name in names:
tag = str(len([k for k in SC.name2id.keys() if k.endswith(seq_name)]))
old_name = seq_name
seq_name = tag+"_"+seq_name
print >>STDERR, "Duplicated entry [%s] was renamed to [%s]" %(old_name, seq_name)
# stores seq_name
SC.id2seq[seq_id] = ""
SC.id2name[seq_id] = seq_name
SC.name2id[seq_name] = seq_id
SC.id2comment[seq_id] = seq_header_fields[1:]
names.add(seq_name)
else:
if seq_name is None:
raise Exception, "Error reading sequences: Wrong format."
# removes all white spaces in line
s = line.strip().replace(" ","")
# append to seq_string
SC.id2seq[seq_id] += s
if seq_name and SC.id2seq[seq_id] == "":
print >>STDERR, seq_name,"has no sequence"
return None
# Everything ok
return SC
def write_fasta(sequences, outfile = None, seqwidth = 80):
""" Writes a SeqGroup python object using FASTA format. """
wrapper = textwrap.TextWrapper()
wrapper.break_on_hyphens = False
wrapper.replace_whitespace = False
wrapper.expand_tabs = False
wrapper.break_long_words = True
wrapper.width = 80
text = '\n'.join([">%s\n%s\n" %( "\t".join([name]+comment), wrapper.fill(seq)) for
name, seq, comment in sequences])
if outfile is not None:
OUT = open(outfile,"w")
OUT.write(text)
OUT.close()
else:
return text
| mit |
OCA/bank-statement-import | account_bank_statement_import_txt_xlsx/wizards/account_bank_statement_import_sheet_mapping_wizard.py | 1 | 6596 | # Copyright 2019 ForgeFlow, S.L.
# Copyright 2020 CorporateHub (https://corporatehub.eu)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models, _
from base64 import b64decode
import json
from os import path
class AccountBankStatementImportSheetMappingWizard(models.TransientModel):
_name = 'account.bank.statement.import.sheet.mapping.wizard'
_description = 'Account Bank Statement Import Sheet Mapping Wizard'
_inherit = ['multi.step.wizard.mixin']
data_file = fields.Binary(
string='Bank Statement File',
required=True,
)
filename = fields.Char()
header = fields.Char()
file_encoding = fields.Selection(
string='Encoding',
selection=lambda self: self._selection_file_encoding(),
)
delimiter = fields.Selection(
string='Delimiter',
selection=lambda self: self._selection_delimiter(),
)
quotechar = fields.Char(
string='Text qualifier',
size=1,
)
timestamp_column = fields.Char(
string='Timestamp column',
)
currency_column = fields.Char(
string='Currency column',
help=(
'In case statement is multi-currency, column to get currency of '
'transaction from'
),
)
amount_column = fields.Char(
string='Amount column',
help='Amount of transaction in journal\'s currency',
)
balance_column = fields.Char(
string='Balance column',
help='Balance after transaction in journal\'s currency',
)
original_currency_column = fields.Char(
string='Original currency column',
help=(
'In case statement provides original currency for transactions '
'with automatic currency conversion, column to get original '
'currency of transaction from'
),
)
original_amount_column = fields.Char(
string='Original amount column',
help=(
'In case statement provides original currency for transactions '
'with automatic currency conversion, column to get original '
'transaction amount in original transaction currency from'
),
)
debit_credit_column = fields.Char(
string='Debit/credit column',
help=(
'Some statement formats use absolute amount value and indicate sign'
'of the transaction by specifying if it was a debit or a credit one'
),
)
debit_value = fields.Char(
string='Debit value',
help='Value of debit/credit column that indicates if it\'s a debit',
default='D',
)
credit_value = fields.Char(
string='Credit value',
help='Value of debit/credit column that indicates if it\'s a credit',
default='C',
)
transaction_id_column = fields.Char(
string='Unique transaction ID column',
)
description_column = fields.Char(
string='Description column',
)
notes_column = fields.Char(
string='Notes column',
)
reference_column = fields.Char(
string='Reference column',
)
partner_name_column = fields.Char(
string='Partner Name column',
)
bank_name_column = fields.Char(
string='Bank Name column',
help='Partner\'s bank',
)
bank_account_column = fields.Char(
string='Bank Account column',
help='Partner\'s bank account',
)
@api.model
def _selection_file_encoding(self):
return self.env['account.bank.statement.import.sheet.mapping']._fields[
'file_encoding'
].selection
@api.model
def _selection_delimiter(self):
return self.env['account.bank.statement.import.sheet.mapping']._fields[
'delimiter'
].selection
@api.onchange('data_file')
def _onchange_data_file(self):
Parser = self.env['account.bank.statement.import.sheet.parser']
Mapping = self.env['account.bank.statement.import.sheet.mapping']
if not self.data_file:
return
csv_options = {}
if self.delimiter:
csv_options['delimiter'] = \
Mapping._decode_column_delimiter_character(self.delimiter)
if self.quotechar:
csv_options['quotechar'] = self.quotechar
header = Parser.parse_header(
b64decode(self.data_file),
self.file_encoding,
csv_options
)
self.header = json.dumps(header)
@api.model
def statement_columns(self):
header = self.env.context.get('header')
if not header:
return []
return [(x, x) for x in json.loads(header)]
@api.multi
def _get_mapping_values(self):
"""Hook for extension"""
self.ensure_one()
return {
'name': _('Mapping from %s') % path.basename(self.filename),
'float_thousands_sep': 'comma',
'float_decimal_sep': 'dot',
'file_encoding': self.file_encoding,
'delimiter': self.delimiter,
'quotechar': self.quotechar,
'timestamp_format': '%d/%m/%Y',
'timestamp_column': self.timestamp_column,
'currency_column': self.currency_column,
'amount_column': self.amount_column,
'balance_column': self.balance_column,
'original_currency_column': self.original_currency_column,
'original_amount_column': self.original_amount_column,
'debit_credit_column': self.debit_credit_column,
'debit_value': self.debit_value,
'credit_value': self.credit_value,
'transaction_id_column': self.transaction_id_column,
'description_column': self.description_column,
'notes_column': self.notes_column,
'reference_column': self.reference_column,
'partner_name_column': self.partner_name_column,
'bank_name_column': self.bank_name_column,
'bank_account_column': self.bank_account_column,
}
@api.multi
def import_mapping(self):
self.ensure_one()
mapping = self.env['account.bank.statement.import.sheet.mapping']\
.create(self._get_mapping_values())
return {
'type': 'ir.actions.act_window',
'name': _('Imported Mapping'),
'res_model': 'account.bank.statement.import.sheet.mapping',
'res_id': mapping.id,
'view_mode': 'form',
'view_id': False,
'target': 'current',
}
| agpl-3.0 |
mvaled/sentry | src/sentry/south_migrations/0274_auto__add_index_commit_repository_id_date_added.py | 1 | 87281 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Commit', fields ['repository_id', 'date_added']
db.create_index('sentry_commit', ['repository_id', 'date_added'])
def backwards(self, orm):
# Removing index on 'Commit', fields ['repository_id', 'date_added']
db.delete_index('sentry_commit', ['repository_id', 'date_added'])
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 10, 17, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together': "(('organization_id', 'name'),)",
'object_name': 'Repository'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'M3q5J1slH8D6cKCdQ80XjNpgY9lanKSB'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
| bsd-3-clause |
matthewwardrop/python-hdf5storage | hdf5storage/datatypes.py | 1 | 6963 | import os, math, copy
import tables
import numpy as np
from .interfaces import DataNode,DataGroup,DataLeaf,HDF5Node,HDF5Group,HDF5Leaf,HDF5LeafTable,HDF5LeafArray
from .data import Storage
#################### DATA TYPE CLASSES #########################################
#
# Get the appropriate data type for the input data
def getDataType(name,data=None,dtype=None,attrs={}):
if isinstance(data,DataNode):
data = copy.copy(data)
data.set_name = name
return data
if dtype == 'array' or dtype is None and isinstance(data,np.ndarray):
return DataArray(name,data,attrs=attrs)
if dtype == 'storage' or dtype is None and isinstance(data,Storage):
children = copy.deepcopy(data._children)
dataObj = Storage(name=name,attrs=attrs)
dataObj._children = children
return dataObj
if dtype == 'dict' or dtype is None and isinstance(data,dict):
return DataDict(name,data,attrs=attrs)
if dtype == 'list' or dtype is None and isinstance(data,list):
dataList = DataList(name,data,attrs=attrs)
return dataList
# LOOKING BAD! Let's try converting to np.array, and try again.
try:
data = np.array(data)
return getDataType(name,data,dtype,attrs=attrs)
except:
pass
#if dtype == 'var_array' or dtype is None and type(data) in [np.ndarray,list]:
# return DataVariableArray(name,data,**args)
#if dtype == 'string' or type(data) in [str]:
# return DataString(name,data)
raise ValueError, "Unknown data type for type %s (%s)" % (str(type(data)),str(data))
#
# Return true if populateDataType has handled all children of a node
def populateDataType(dataObj,hdfNode,extractOnly = False,prefix=''):
try:
type = hdfNode._f_getAttr('type')
except AttributeError, e:
if extractOnly:
return None
return False
path = hdfNode._v_pathname[len(prefix):]
name = os.path.basename(path)
node = os.path.dirname(path) if os.path.dirname(path) != '' else "/"
if not hdfNode._v_pathname.startswith(prefix):
raise ValueError, "Invalid node (outside prefix)."
if type == 'storage':
obj = Storage
dtype = 'storage'
elif type == 'data_dict':
obj = DataDict
dtype = 'dict'
elif type == 'data_array':
obj = DataArray
dtype = 'array'
elif type == 'data_variablearray':
obj = DataVariableArray
dtype = 'var_array'
elif type == "data_list":
obj = DataList
dtype = "list"
else:
raise ValueError, "Unknown data type %s"%type
if issubclass(obj,HDF5Node):
extracted = obj._hdf5_populate(hdfNode)
else:
raise Exception("Unknown type")
if extractOnly:
return extracted
dataObj.add_node(name=name,parent=node,data=extracted['data'],dtype=dtype,attrs=extracted['args'])
return True
class DataDict(HDF5LeafTable,DataLeaf):
KEY_LENGTH = 30
def __init__(self,name,data,attrs={}):
self.set_name(name)
self.__dict = data
self.__props = attrs
############## HDF5 Methods ############################################
@property
def _hdf5_desc(self):
return ""
@property
def _hdf5_attrs(self):
attrs = {'type':'data_dict'}
attrs.update(self.__props)
return attrs
@property
def _hdf5_leaf_table_structure(self):
class DictTable(tables.IsDescription):
key_string = tables.StringCol(self.KEY_LENGTH)
key_float = tables.Float64Col()
value_float = tables.Float64Col()
return DictTable
@property
def _hdf5_leaf_table_entries(self):
entries = []
for key,value in self.__dict.items():
if isinstance(key,str):
entries.append( {'key_string':key, 'key_float':None, 'value_float':value} )
else:
entries.append( {'key_string':None, 'key_float':key, 'value_float':value} )
return entries
@classmethod
def _hdf5_populate(cls,hdfNode):
d = {}
for row in hdfNode.iterrows():
if math.isnan(row['key_float']):
d[row['key_string']] = row['value_float']
else:
d[row['key_float']] = row['value_float']
args = {}
for attr in hdfNode._v_attrs._f_list():
args[attr] = getattr(hdfNode._v_attrs,attr)
return {'data':d,'args':args}
############## DataLeaf Methods #######################################
@property
def value(self):
return self.__dict
@property
def set_value(self,value):
self.__dict = dict
@property
def attrs(self):
return self.__props
@property
def set_attrs(self,**kwargs):
self.__props.update(kwargs)
class DataList(HDF5Group,DataLeaf):
def __init__(self,name,data,attrs={}):
self.set_name(name)
self.set_value(data)
self.__props = attrs
######### HDF5 Methods #################################################
def _node(self,node):
if node.isdigit():
return self.__list[int(node)]
raise errors.NoSuchNodeError("'%s'"%node)
@property
def _hdf5_desc(self):
return ""
@property
def _hdf5_group_children(self):
return self.__list
@property
def _hdf5_attrs(self):
attrs = {'type':'data_list'}
for prop in self.__props:
attrs[prop] = self.__props[prop]
return attrs
@classmethod
def _hdf5_populate(cls,hdfNode):
found = {}
for key,value in hdfNode._v_children.items():
extracted = populateDataType(dataObj=None,hdfNode=value,extractOnly=True)
#if isinstance(object,DataLeaf):
# extracted['data'].set_attrs(**extracted['args'])
found[key] = extracted['data']
final = []
for i in xrange(len(found)):
final.append(found["index_"+str(i)])
args = {}
for attr in hdfNode._v_attrs._f_list():
args[attr] = getattr(hdfNode._v_attrs,attr)
return {'data':final,'args':args}
######## DataLeaf #####################################################
@property
def value(self):
return map(lambda x: x.value if isinstance(x,DataLeaf) else x, self.__list)
def set_value(self,value):
self.__list = []
for item in value:
self.append(item)
def append(self,value,dtype=None,**args):
self.__list.append(getDataType("index_"+str(len(self.__list)),value,dtype,**args))
@property
def attrs(self):
return self.__props
def set_attrs(self,**kwargs):
self.__props.update(kwargs)
class DataArray(HDF5LeafArray,DataLeaf):
def __init__(self,name,data=None,attrs={}):
self.set_name(name)
self.__data = np.array(data)
self.__props = attrs
######### HDF5 Methods #################################################
@property
def _hdf5_desc(self):
return ''
@property
def _hdf5_leaf_array(self):
return self.__data
@property
def _hdf5_attrs(self):
attrs = {'type':'data_array'}
for prop in self.__props:
attrs[prop] = self.__props[prop]
return attrs
@classmethod
def _hdf5_populate(cls,hdfNode):
args = {}
for attr in hdfNode._v_attrs._f_list():
args[attr] = getattr(hdfNode._v_attrs,attr)
return {'data':hdfNode.read(),'args':args}
######### Data Value Methods ###########################################
@property
def value(self):
return self.__data
def set_value(self,value):
self.__data = np.array(value)
@property
def attrs(self):
return self.__props
def set_attrs(self,**kwargs):
self.__props.update(kwargs)
| mit |
yotchang4s/cafebabepy | src/main/python/idlelib/searchbase.py | 7 | 7449 | '''Define SearchDialogBase used by Search, Replace, and Grep dialogs.'''
from tkinter import Toplevel, Frame
from tkinter.ttk import Entry, Label, Button, Checkbutton, Radiobutton
class SearchDialogBase:
'''Create most of a 3 or 4 row, 3 column search dialog.
The left and wide middle column contain:
1 or 2 labeled text entry lines (make_entry, create_entries);
a row of standard Checkbuttons (make_frame, create_option_buttons),
each of which corresponds to a search engine Variable;
a row of dialog-specific Check/Radiobuttons (create_other_buttons).
The narrow right column contains command buttons
(make_button, create_command_buttons).
These are bound to functions that execute the command.
Except for command buttons, this base class is not limited to items
common to all three subclasses. Rather, it is the Find dialog minus
the "Find Next" command, its execution function, and the
default_command attribute needed in create_widgets. The other
dialogs override attributes and methods, the latter to replace and
add widgets.
'''
title = "Search Dialog" # replace in subclasses
icon = "Search"
needwrapbutton = 1 # not in Find in Files
def __init__(self, root, engine):
'''Initialize root, engine, and top attributes.
top (level widget): set in create_widgets() called from open().
text (Text searched): set in open(), only used in subclasses().
ent (ry): created in make_entry() called from create_entry().
row (of grid): 0 in create_widgets(), +1 in make_entry/frame().
default_command: set in subclasses, used in create_widgers().
title (of dialog): class attribute, override in subclasses.
icon (of dialog): ditto, use unclear if cannot minimize dialog.
'''
self.root = root
self.engine = engine
self.top = None
def open(self, text, searchphrase=None):
"Make dialog visible on top of others and ready to use."
self.text = text
if not self.top:
self.create_widgets()
else:
self.top.deiconify()
self.top.tkraise()
if searchphrase:
self.ent.delete(0,"end")
self.ent.insert("end",searchphrase)
self.ent.focus_set()
self.ent.selection_range(0, "end")
self.ent.icursor(0)
self.top.grab_set()
def close(self, event=None):
"Put dialog away for later use."
if self.top:
self.top.grab_release()
self.top.withdraw()
def create_widgets(self):
'''Create basic 3 row x 3 col search (find) dialog.
Other dialogs override subsidiary create_x methods as needed.
Replace and Find-in-Files add another entry row.
'''
top = Toplevel(self.root)
top.bind("<Return>", self.default_command)
top.bind("<Escape>", self.close)
top.protocol("WM_DELETE_WINDOW", self.close)
top.wm_title(self.title)
top.wm_iconname(self.icon)
self.top = top
self.bell = top.bell
self.row = 0
self.top.grid_columnconfigure(0, pad=2, weight=0)
self.top.grid_columnconfigure(1, pad=2, minsize=100, weight=100)
self.create_entries() # row 0 (and maybe 1), cols 0, 1
self.create_option_buttons() # next row, cols 0, 1
self.create_other_buttons() # next row, cols 0, 1
self.create_command_buttons() # col 2, all rows
def make_entry(self, label_text, var):
'''Return (entry, label), .
entry - gridded labeled Entry for text entry.
label - Label widget, returned for testing.
'''
label = Label(self.top, text=label_text)
label.grid(row=self.row, column=0, sticky="nw")
entry = Entry(self.top, textvariable=var, exportselection=0)
entry.grid(row=self.row, column=1, sticky="nwe")
self.row = self.row + 1
return entry, label
def create_entries(self):
"Create one or more entry lines with make_entry."
self.ent = self.make_entry("Find:", self.engine.patvar)[0]
def make_frame(self,labeltext=None):
'''Return (frame, label).
frame - gridded labeled Frame for option or other buttons.
label - Label widget, returned for testing.
'''
if labeltext:
label = Label(self.top, text=labeltext)
label.grid(row=self.row, column=0, sticky="nw")
else:
label = ''
frame = Frame(self.top)
frame.grid(row=self.row, column=1, columnspan=1, sticky="nwe")
self.row = self.row + 1
return frame, label
def create_option_buttons(self):
'''Return (filled frame, options) for testing.
Options is a list of searchengine booleanvar, label pairs.
A gridded frame from make_frame is filled with a Checkbutton
for each pair, bound to the var, with the corresponding label.
'''
frame = self.make_frame("Options")[0]
engine = self.engine
options = [(engine.revar, "Regular expression"),
(engine.casevar, "Match case"),
(engine.wordvar, "Whole word")]
if self.needwrapbutton:
options.append((engine.wrapvar, "Wrap around"))
for var, label in options:
btn = Checkbutton(frame, variable=var, text=label)
btn.pack(side="left", fill="both")
return frame, options
def create_other_buttons(self):
'''Return (frame, others) for testing.
Others is a list of value, label pairs.
A gridded frame from make_frame is filled with radio buttons.
'''
frame = self.make_frame("Direction")[0]
var = self.engine.backvar
others = [(1, 'Up'), (0, 'Down')]
for val, label in others:
btn = Radiobutton(frame, variable=var, value=val, text=label)
btn.pack(side="left", fill="both")
return frame, others
def make_button(self, label, command, isdef=0):
"Return command button gridded in command frame."
b = Button(self.buttonframe,
text=label, command=command,
default=isdef and "active" or "normal")
cols,rows=self.buttonframe.grid_size()
b.grid(pady=1,row=rows,column=0,sticky="ew")
self.buttonframe.grid(rowspan=rows+1)
return b
def create_command_buttons(self):
"Place buttons in vertical command frame gridded on right."
f = self.buttonframe = Frame(self.top)
f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
b = self.make_button("close", self.close)
b.lower()
class _searchbase(SearchDialogBase): # htest #
"Create auto-opening dialog with no text connection."
def __init__(self, parent):
import re
from idlelib import searchengine
self.root = parent
self.engine = searchengine.get(parent)
self.create_widgets()
print(parent.geometry())
width,height, x,y = list(map(int, re.split('[x+]', parent.geometry())))
self.top.geometry("+%d+%d" % (x + 40, y + 175))
def default_command(self, dummy): pass
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_searchbase', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_searchbase)
| bsd-3-clause |
gsehub/edx-platform | cms/envs/devstack_with_worker.py | 23 | 1050 | """
This config file follows the devstack enviroment, but adds the
requirement of a celery worker running in the background to process
celery tasks.
When testing locally, run lms/cms with this settings file as well, to test queueing
of tasks onto the appropriate workers.
In two separate processes on devstack:
paver devstack studio --settings=devstack_with_worker
./manage.py cms celery worker --settings=devstack_with_worker
"""
import os
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
if 'BOK_CHOY_HOSTNAME' in os.environ:
from cms.envs.devstack_docker import *
else:
from cms.envs.devstack import *
# Require a separate celery worker
CELERY_ALWAYS_EAGER = False
# Disable transaction management because we are using a worker. Views
# that request a task and wait for the result will deadlock otherwise.
for database_name in DATABASES:
DATABASES[database_name]['ATOMIC_REQUESTS'] = False
| agpl-3.0 |
bikong2/django | tests/test_client_regress/views.py | 143 | 5161 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.test.utils import setup_test_environment
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render_to_response(template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
setup_test_environment()
c = Client()
c.get("/no_template_view/")
return render_to_response('base.html', {'nested': 'yes'})
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_response(request):
return JsonResponse({'key': 'value'})
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
| bsd-3-clause |
aorti017/rcrawler | src/page_interface.py | 1 | 2577 | import urllib2
import re
from bs4 import BeautifulSoup
#opens and reads the page associated with the passed in URL
def open_link(url):
try:
#print "Opening page - " + url
#returns the contents of the page
return urllib2.urlopen(url).read()
except urllib2.HTTPError, error:
#print "unable to open " + url
#prints the HTTPError code
#print error.read()
print "Error: " + url
#if unable to open or connect return the string "404" signifying that
#the HTTP response code was indicative of an error causing an exception
return "404"
#adds all of the words found on the page to the index with their update associated values
def add_to_index(page_url, page, index):
#print "adding to index"
#strips the entirety of the page into alphanumeric characters only, and
#replaces any non alphanumeric character with a ' '
words_on_page = re.sub(r'\W+', ' ', page).split()
#for each word found on the page
for words in words_on_page:
#sets in_in to be the bool of the word already being in the index
in_it = words in index
#if it is already in the index
if in_it:
#if the page's URL is no already part of the value
if page_url not in index[words]:
#add the page's URL to the value, which is a list of associated URLs
index[words].append(page_url)
else:
#else create a new key:value pair with the word and page URL
index[words] = [page_url]
#retrieves and returns all of the links on the passed in page
def get_all_links(page_url, page, links):
#print "getting all links"
#initialize the page as a BeautifulSoup object
soup = BeautifulSoup(page)
#for every element in the BeautifulSoup object which has an 'a' tag and an 'href' value
for l in soup.find_all('a', href=True):
#url is equal to the 'href' value
url = l['href']
#the URL is set to all lower case
url = url.lower()
#if the URL is not a link to an image and it is not '/'
if url != "/" and not(url.endswith(".jpg") or url.endswith(".png")):
#if the URL starts with '/' prefix the missing part of the URL
if url.startswith('/'):
url = page_url + url
#if the URL does not start with 'http' prefix 'http/'
elif not url.startswith('http'):
url = page_url + '/' + url
#if the URL is not already in the list of links to crawl add it
if url not in links:
links.append(url)
#return the new list of links to crawl
return links
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
SpamExperts/OrangeAssassin | tests/profiling/test_match.py | 2 | 2226 | from __future__ import absolute_import, print_function
import os
import unittest
import platform
import tests.util
GTUBE = "XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X"
IS_PYPY = "pypy" in platform.python_implementation().lower()
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class MemoryTest(tests.util.TestBase):
ptype = "memory"
limits = {
"test_simple": {
"ilimit": None,
"elimit": None,
"plimit": None,
"inclimit": None,
},
"test_simple_gtube": {
"ilimit": None,
"elimit": None,
"plimit": None,
"inclimit": None,
}
}
def test_simple(self):
"""Profile simple ham check."""
limits = self.limits["test_simple"]
name = "%s: Simple ham message check" % self.ptype.title()
sname = "simple_%s" % self.ptype
msg = "Subject: test\n\nTest abcd test."
self.setup_conf(config="body TEST_RULE /abcd/",
pre_config="report _SCORE_")
self.profile_pad(name, sname, msg, ptype=self.ptype, **limits)
def test_simple_gtube(self):
"""Profile GTUBE spam check."""
limits = self.limits["test_simple_gtube"]
name = "%s: Simple GTUBE message check" % self.ptype.title()
sname = "gtube_%s" % self.ptype
msg = "Subject: test\n\n" + GTUBE
self.setup_conf(pre_config="report _SCORE_")
self.profile_pad(name, sname, msg, ptype=self.ptype, **limits)
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class MemoryUSSTest(MemoryTest):
ptype = "memory-uss"
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class MemoryPSSTest(MemoryTest):
ptype = "memory-pss"
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class CPUTest(MemoryTest):
ptype = "cpu"
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class IOWriteTest(MemoryTest):
ptype = "io-write"
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class IOReadTest(MemoryTest):
ptype = "io-read"
@unittest.skipIf(IS_PYPY, "Psutil doesn't work on PyPy")
class IOCountTest(MemoryTest):
ptype = "io-count"
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/sqlite3/test/py25tests.py | 127 | 2736 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from __future__ import with_statement
import unittest
import sqlite3 as sqlite
did_rollback = False
class MyConnection(sqlite.Connection):
def rollback(self):
global did_rollback
did_rollback = True
sqlite.Connection.rollback(self)
class ContextTests(unittest.TestCase):
def setUp(self):
global did_rollback
self.con = sqlite.connect(":memory:", factory=MyConnection)
self.con.execute("create table test(c unique)")
did_rollback = False
def tearDown(self):
self.con.close()
def CheckContextManager(self):
"""Can the connection be used as a context manager at all?"""
with self.con:
pass
def CheckContextManagerCommit(self):
"""Is a commit called in the context manager?"""
with self.con:
self.con.execute("insert into test(c) values ('foo')")
self.con.rollback()
count = self.con.execute("select count(*) from test").fetchone()[0]
self.assertEqual(count, 1)
def CheckContextManagerRollback(self):
"""Is a rollback called in the context manager?"""
global did_rollback
self.assertEqual(did_rollback, False)
try:
with self.con:
self.con.execute("insert into test(c) values (4)")
self.con.execute("insert into test(c) values (4)")
except sqlite.IntegrityError:
pass
self.assertEqual(did_rollback, True)
def suite():
ctx_suite = unittest.makeSuite(ContextTests, "Check")
return unittest.TestSuite((ctx_suite,))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| mit |
paulluo/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
amilan/mxcube3 | mxcube3/routes/Collection.py | 3 | 6894 | from flask import session, redirect, url_for, render_template, request, Response
from mxcube3 import app as mxcube
import logging
###----COLLECTION----###
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>/mode", methods=['POST'])
def set_collection_method(method):
"""Define the collection method, standard collection, helical, mesh
data={generic_data, "Method":method}
return_data={"result": True/False}
OBSOLETE BY ADD COLLECTION
"""
data = dict(request.POST.items())
return mxcube.collection.defineCollectionMethod(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>", methods=['PUT'])
def update_collection(method):
"""update a collection into the sample queue ***asociated to a sample!
data={generic_data, "Method":method, "SampleId": sampleid ,"CollectionId": id, parameters},
for example for a standard data collection:
data={generic_data, "Method":StandardCollection, "SampleId": sampleid, "CollectionId": colid, parameters:{
osc_range: { label: "Oscillation range", default_value: 1.0, value: 0 },
osc_start: { label: "Oscillation start", default_value: 0, value: 0 },
exp_time: { label: "Exposure time", default_value: 10.0, value: 0 },
n_images: { label: "Number of images", default_value: 1, value: 0 },
energy: {label: "Energy", default_value: 12.3984, value: 0 },
resolution: {label: "Resolution", default_value: 2.498, value: 0 },
transmission: {label: "Transmission", default_value: 100.0, value: 0} },
},
return_data={"result": True/False}
"""
data = dict(request.POST.items())
return mxcube.collection.updateCollection(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>", methods=['POST'])
def add_collection(id, colid):
"""Add a collection into the sample queue ***asociate to a sample!
data={generic_data, "Method":method, "SampleId": sampleid ,"CollectionId": id, parameters},
for example for a standard data collection:
data={generic_data, "Method":StandardCollection, "SampleId": sampleid, "CollectionId": colid, parameters:{
osc_range: { label: "Oscillation range", default_value: 1.0, value: 0 },
osc_start: { label: "Oscillation start", default_value: 0, value: 0 },
exp_time: { label: "Exposure time", default_value: 10.0, value: 0 },
n_images: { label: "Number of images", default_value: 1, value: 0 },
energy: {label: "Energy", default_value: 12.3984, value: 0 },
resolution: {label: "Resolution", default_value: 2.498, value: 0 },
transmission: {label: "Transmission", default_value: 100.0, value: 0} },
},
return_data={"result": True/False}
"""
data = dict(request.POST.items())
print data
return mxcube.collection.addCollection(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>", methods=['GET'])
def get_collection(id):
"""get the collection with id:"colid"
data={generic_data},
for example for a standard data collection:
return_data={"Method":StandardCollection, "SampleId": sampleid, "CollectionId": colid, parameters:{
osc_range: { label: "Oscillation range", default_value: 1.0, value: 0 },
osc_start: { label: "Oscillation start", default_value: 0, value: 0 },
exp_time: { label: "Exposure time", default_value: 10.0, value: 0 },
n_images: { label: "Number of images", default_value: 1, value: 0 },
energy: {label: "Energy", default_value: 12.3984, value: 0 },
resolution: {label: "Resolution", default_value: 2.498, value: 0 },
transmission: {label: "Transmission", default_value: 100.0, value: 0} },
},
"""
data = dict(request.POST.items())
return mxcube.collection.getCollection(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections", methods=['GET'])
def get_collection_list(id):
"""get the collection with id:"id"
data={generic_data},
for example for a standard data collection:
return_data={"Method":StandarCollection, "SampleId": sampleid, "CollectionId": colid, parameters:{
osc_range: { label: "Oscillation range", default_value: 1.0, value: 0 },
osc_start: { label: "Oscillation start", default_value: 0, value: 0 },
exp_time: { label: "Exposure time", default_value: 10.0, value: 0 },
n_images: { label: "Number of images", default_value: 1, value: 0 },
energy: {label: "Energy", default_value: 12.3984, value: 0 },
resolution: {label: "Resolution", default_value: 2.498, value: 0 },
transmission: {label: "Transmission", default_value: 100.0, value: 0} },
},
"""
data = dict(request.POST.items())
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>", methods=['DELETE'])
def delete_collection(id):
"""delete the collection with id:"id"
data={generic_data, "CollectionId": id},
return_data={"result": True/False}
"""
data = dict(request.POST.items())
return samples.getCollection(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/status", methods=['GET'])
def get_collection_status(id):
"""get the status of all data collections, (running, stopped, cancelled, finished, ...)
data={generic_data},
return_data={ {"CollectionId": id1, "Status": status}, ..., {"CollectionId": idN, "Status": status} }
"""
data = dict(request.POST.items())
return mxcube.collection.getCollectionStatus(data)
@mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>/status", methods=['GET'])
def get_collection_id_status(id):
"""get the status of the collection with id:"id", (running, stopped, cancelled, finished, ...)
data={generic_data},
return_data={"CollectionId": id, "Status": status}
"""
data = dict(request.POST.items())
return mxcube.collection.getCollectionStatus(data)
@mxcube.route("/mxcube/api/v0.1/samples/<sampleid>/collections/<colid>/run", methods=['POST'])
def run_collection(**args):
"""run the collection with id:"colid"
data={generic_data},
return_data={"CollectionId": id, "Status": status}
"""
print "in run collection", args['sampleid'], args['colid']
data = dict(request.POST.items())
print data
#return "collection ok"
return mxcube.collection.executeCollection(data)
#return collection.runCollectionStatus(data)
# @mxcube.route("/mxcube/api/v0.1/samples/<id>/collections/<colid>/run", methods='POST')
# def run_queue(id):
# """run the whole queue
# data={generic_data},
# return_data={"CollectionId": id, "Status": status}
# """
# data = dict(request.POST.items())
# print data
# #return collection.runCollectionStatus(data)
| gpl-2.0 |
auduny/chains | lib/chains/commandline/describe.py | 1 | 1436 | from chains.commandline import commands
from chains.common import utils, introspect
import os
def getCommands():
result = {}
for dir in commands.getCommandDirs():
for file in os.listdir(dir):
if file[0] == '_': continue
file = file.split('.')
if file[-1:][0] != 'py': continue
file = '.'.join(file[:-1])
section, command = utils.caseSplit(file)
if not result.has_key(section):
result[section] = []
result[section].append(command)
return result
def getSections():
result = []
for section in getCommands():
result.append(section)
result.sort()
return result
def getSectionCommands(section):
commands = getCommands()
result = commands.get(section)
result.sort()
return result
def getCommandHelp(section, command):
dic = commands.load(section, command)
obj = dic[section][command]
fun = getattr(obj, 'main')
descr = introspect.describeMethod(fun)
return descr
"""
def formatCommand(section, command, info):
txtCommand = '%s.%s' % (section, command)
txtArgs = ''
for arg in info['args']:
txtArgs += arg['key'] + ' '
txtDescr = info['info']
return '%-60s %s' % (txtCommand + ' ' + txtArgs, txtDescr)
"""
if __name__ == '__main__':
import sys
if sys.argv[1] == 'sections':
print ' '.join(getSections())
| gpl-2.0 |
viewdy/phantomjs2 | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/png.py | 170 | 3914 | # Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in png files."""
import os
import re
from webkitpy.common import checksvnconfigfile
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.checkout.scm.detection import SCMDetector
class PNGChecker(object):
"""Check svn:mime-type for checking style"""
categories = set(['image/png'])
def __init__(self, file_path, handle_style_error, scm=None, host=None):
self._file_path = file_path
self._handle_style_error = handle_style_error
self._host = host or SystemHost()
self._fs = self._host.filesystem
self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
def check(self, inline=None):
errorstr = ""
config_file_path = ""
detection = self._detector.display_name()
if self._fs.exists(self._file_path) and self._file_path.endswith("-expected.png"):
with self._fs.open_binary_file_for_reading(self._file_path) as filehandle:
if not read_checksum_from_png.read_checksum(filehandle):
self._handle_style_error(0, 'image/png', 5, "Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.")
if detection == "git":
(file_missing, autoprop_missing, png_missing) = checksvnconfigfile.check(self._host, self._fs)
config_file_path = checksvnconfigfile.config_file_path(self._host, self._fs)
if file_missing:
self._handle_style_error(0, 'image/png', 5, "There is no SVN config file. (%s)" % config_file_path)
elif autoprop_missing and png_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path) + checksvnconfigfile.errorstr_png(config_file_path))
elif autoprop_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path))
elif png_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_png(config_file_path))
elif detection == "svn":
prop_get = self._detector.propget("svn:mime-type", self._file_path)
if prop_get != "image/png":
errorstr = "Set the svn:mime-type property (svn propset svn:mime-type image/png %s)." % self._file_path
self._handle_style_error(0, 'image/png', 5, errorstr)
| bsd-3-clause |
whn09/tensorflow | tensorflow/python/kernel_tests/norm_op_test.py | 53 | 4705 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.norm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test as test_lib
def _AddTest(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class NormOpTest(test_lib.TestCase):
def testBadOrder(self):
matrix = [[0., 1.], [2., 3.]]
for ord_ in "foo", -7, -1.1, 0:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord="fro")
for ord_ in "foo", -7, -1.1, 0:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord=ord_, axis=-1)
for ord_ in 1.1, 2:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported matrix norm"):
linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1])
def testInvalidAxis(self):
matrix = [[0., 1.], [2., 3.]]
for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]:
error_prefix = ("'axis' must be None, an integer, or a tuple of 2 unique "
"integers")
with self.assertRaisesRegexp(ValueError, error_prefix):
linalg_ops.norm(matrix, axis=axis_)
def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
def _CompareNorm(self, matrix):
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
with self.test_session(use_gpu=True) as sess:
if use_static_shape_:
tf_matrix = constant_op.constant(matrix)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keep_dims=keep_dims_)
tf_norm_val = sess.run(tf_norm)
else:
tf_matrix = array_ops.placeholder(dtype_)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keep_dims=keep_dims_)
tf_norm_val = sess.run(tf_norm, feed_dict={tf_matrix: matrix})
self.assertAllClose(np_norm, tf_norm_val)
def Test(self):
is_matrix_norm = (isinstance(axis_, tuple) or
isinstance(axis_, list)) and len(axis_) == 2
is_fancy_p_norm = np.isreal(ord_) and np.floor(ord_) != ord_
if ((not is_matrix_norm and ord_ == "fro") or
(is_matrix_norm and is_fancy_p_norm)):
self.skipTest("Not supported by neither numpy.linalg.norm nor tf.norm")
if is_matrix_norm and ord_ == 2:
self.skipTest("Not supported by tf.norm")
if ord_ == 'euclidean' or (axis_ is None and len(shape) > 2):
self.skipTest("Not supported by numpy.linalg.norm")
matrix = np.random.randn(*shape_).astype(dtype_)
_CompareNorm(self, matrix)
return Test
# pylint: disable=redefined-builtin
if __name__ == "__main__":
for use_static_shape in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 2, 5:
for cols in 2, 5:
for batch in [], [2], [2, 3]:
shape = batch + [rows, cols]
for ord in "euclidean", "fro", 0.5, 1, 2, np.inf:
for axis in [
None, (-2, -1), (-1, -2), -len(shape), 0, len(shape) - 1
]:
for keep_dims in False, True:
name = "%s_%s_ord_%s_axis_%s_%s_%s" % (
dtype.__name__, "_".join(map(str, shape)), ord, axis,
keep_dims, use_static_shape)
_AddTest(NormOpTest, "Norm_" + name,
_GetNormOpTest(dtype, shape, ord, axis, keep_dims,
use_static_shape))
test_lib.main()
| apache-2.0 |
whn09/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transform.py | 18 | 10559 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transform takes a list of `Series` and returns a namedtuple of `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import collections
from .series import Series
from .series import TransformedSeries
from tensorflow.python.util import tf_inspect
def _make_list_of_series(x):
"""Converts `x` into a list of `Series` if possible.
Args:
x: a `Series`, a list of `Series` or `None`.
Returns:
`x` if it is a list of Series, `[x]` if `x` is a `Series`, `[]` if x is
`None`.
Raises:
TypeError: `x` is not a `Series` a list of `Series` or `None`.
"""
if x is None:
return []
elif isinstance(x, Series):
return [x]
elif isinstance(x, collections.Iterable):
for i, y in enumerate(x):
if not isinstance(y, Series):
raise TypeError(
"Expected a tuple or list of Series; entry %s has type %s." %
(i, type(y).__name__))
return list(x)
raise TypeError("Expected a Series or list of Series; got %s" %
type(x).__name__)
def _make_tuple_of_string(x):
"""Converts `x` into a list of `str` if possible.
Args:
x: a `str`, a list of `str`, a tuple of `str`, or `None`.
Returns:
`x` if it is a tuple of str, `tuple(x)` if it is a list of str,
`(x)` if `x` is a `str`, `()` if x is `None`.
Raises:
TypeError: `x` is not a `str`, a list or tuple of `str`, or `None`.
"""
if x is None:
return ()
elif isinstance(x, str):
return (x,)
elif isinstance(x, collections.Iterable):
for i, y in enumerate(x):
if not isinstance(y, str):
raise TypeError(
"Expected a tuple or list of strings; entry %s has type %s." %
(i, type(y).__name__))
return x
raise TypeError("Expected a string or list of strings or tuple of strings; " +
"got %s" % type(x).__name__)
def parameter(func):
"""Tag functions annotated with `@parameter` for later retrieval.
Note that all `@parameter`s are automatically `@property`s as well.
Args:
func: the getter function to tag and wrap
Returns:
A `@property` whose getter function is marked with is_parameter = True
"""
func.is_parameter = True
return property(func)
class Transform(object):
"""A function from a list of `Series` to a namedtuple of `Series`.
Transforms map zero or more Series of a DataFrame to new Series.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._return_type = None
@abstractproperty
def name(self):
"""Name of the transform."""
raise NotImplementedError()
def parameters(self):
"""A dict of names to values of properties marked with `@parameter`."""
property_param_names = [name
for name, func in tf_inspect.getmembers(type(self))
if (hasattr(func, "fget") and hasattr(
getattr(func, "fget"), "is_parameter"))]
return {name: getattr(self, name) for name in property_param_names}
@abstractproperty
def input_valency(self):
"""The number of `Series` that the `Transform` should expect as input.
`None` indicates that the transform can take a variable number of inputs.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
The number of expected inputs.
"""
raise NotImplementedError()
@property
def output_names(self):
"""The names of `Series` output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
A tuple of names of outputs provided by this Transform.
"""
return _make_tuple_of_string(self._output_names)
@abstractproperty
def _output_names(self):
"""The names of `Series` output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
Names of outputs provided by this Transform, as a string, tuple, or list.
"""
raise NotImplementedError()
@property
def return_type(self):
"""Provides a namedtuple type which will be used for output.
A Transform generates one or many outputs, named according to
_output_names. This method creates (and caches) a namedtuple type using
those names as the keys. The Transform output is then generated by
instantiating an object of this type with corresponding values.
Note this output type is used both for `__call__`, in which case the
values are `TransformedSeries`, and for `apply_transform`, in which case
the values are `Tensor`s.
Returns:
A namedtuple type fixing the order and names of the outputs of this
transform.
"""
if self._return_type is None:
# TODO(soergel): pylint 3 chokes on this, but it is legit and preferred.
# return_type_name = "%sReturnType" % type(self).__name__
return_type_name = "ReturnType"
self._return_type = collections.namedtuple(return_type_name,
self.output_names)
return self._return_type
def __str__(self):
return self.name
def __repr__(self):
parameters_sorted = ["%s: %s" % (repr(k), repr(v))
for k, v in sorted(self.parameters().items())]
parameters_joined = ", ".join(parameters_sorted)
return "%s({%s})" % (self.name, parameters_joined)
def __call__(self, input_series=None):
"""Apply this `Transform` to the provided `Series`, producing 'Series'.
Args:
input_series: None, a `Series`, or a list of input `Series`, acting as
positional arguments.
Returns:
A namedtuple of the output `Series`.
Raises:
ValueError: `input_series` does not have expected length
"""
input_series = _make_list_of_series(input_series)
if len(input_series) != self.input_valency:
raise ValueError("Expected %s input Series but received %s." %
(self.input_valency, len(input_series)))
output_series = self._produce_output_series(input_series)
# pylint: disable=not-callable
return self.return_type(*output_series)
@abstractmethod
def _produce_output_series(self, input_series):
"""Applies the transformation to the `transform_input`.
Args:
input_series: a list of Series representing the input to
the Transform.
Returns:
A list of Series representing the transformed output, in order
corresponding to `_output_names`.
"""
raise NotImplementedError()
class TensorFlowTransform(Transform):
"""A function from a list of `Series` to a namedtuple of `Series`.
Transforms map zero or more Series of a DataFrame to new Series.
"""
__metaclass__ = ABCMeta
def _check_output_tensors(self, output_tensors):
"""Helper for `build(...)`; verifies the output of `_build_transform`.
Args:
output_tensors: value returned by a call to `_build_transform`.
Raises:
TypeError: `transform_output` is not a list.
ValueError: `transform_output` does not match `output_names`.
"""
if not isinstance(output_tensors, self.return_type):
raise TypeError(
"Expected a NamedTuple of Tensors with elements %s; got %s." %
(self.output_names, type(output_tensors).__name__))
def _produce_output_series(self, input_series=None):
"""Apply this `Transform` to the provided `Series`, producing `Series`.
Args:
input_series: None, a `Series`, or a list of input `Series`, acting as
positional arguments.
Returns:
A namedtuple of the output `Series`.
"""
return [TransformedSeries(input_series, self, output_name)
for output_name in self.output_names]
def build_transitive(self, input_series, cache=None, **kwargs):
"""Apply this `Transform` to the provided `Series`, producing 'Tensor's.
Args:
input_series: None, a `Series`, or a list of input `Series`, acting as
positional arguments.
cache: a dict from Series reprs to Tensors.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of the output Tensors.
Raises:
ValueError: `input_series` does not have expected length
"""
# pylint: disable=not-callable
if cache is None:
cache = {}
if len(input_series) != self.input_valency:
raise ValueError("Expected %s input Series but received %s." %
(self.input_valency, len(input_series)))
input_tensors = [series.build(cache, **kwargs) for series in input_series]
# Note we cache each output individually, not just the entire output
# tuple. This allows using the graph as the cache, since it can sensibly
# cache only individual Tensors.
output_reprs = [TransformedSeries.make_repr(input_series, self, output_name)
for output_name in self.output_names]
output_tensors = [cache.get(output_repr) for output_repr in output_reprs]
if None in output_tensors:
result = self._apply_transform(input_tensors, **kwargs)
for output_name, output_repr in zip(self.output_names, output_reprs):
cache[output_repr] = getattr(result, output_name)
else:
result = self.return_type(*output_tensors)
self._check_output_tensors(result)
return result
@abstractmethod
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
raise NotImplementedError()
| apache-2.0 |
peterheim1/robbie_ros | robbie_bringup/nodes/right_follow_joint.py | 1 | 6251 | #!/usr/bin/env python
# freely inspired by http://www.ros.org/wiki/arbotix_python/follow_controller
import roslib
import rospy, actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from dynamixel_msgs.msg import JointState as JointControllerState
#from control_msgs.msg import JointControllerState
from std_msgs.msg import Float64
class JointSubscriber():
def __init__(self,joint):
self.ns ='robbie'
rospy.Subscriber('/' + joint + '/state', JointControllerState, self.joint_state_cb)
rospy.loginfo('Subscribing for %s joint state.',joint)
self.joint_name = joint
self.state = JointControllerState()
self.received = False
def joint_state_cb(self,msg):
if self.received is False:
self.received = True
self.state = msg
def get_position(self):
return self.state.process_value
class JointCommander():
def __init__(self,joint):
self.joint_name = joint
self.pub = rospy.Publisher('/' + joint + '/command',Float64, queue_size=5)
def command(self,pos):
rospy.loginfo('publishing, joint ' + self.joint_name + ', value ' + str(pos))
#rospy.loginfo('publishing, joint ' +pos)
self.pub.publish(pos)
#rospy.loginfo(self.pub)
class FollowController():
def __init__(self):
self.ns = 'right_arm_controller'
self.joints = rospy.get_param('right_joints', '')
namespace = rospy.get_namespace()
rospy.loginfo('Configured for ' + str(len(self.joints)) + 'joints')
self.joint_subs = [JointSubscriber(name) for name in self.joints]
self.joint_pubs = [JointCommander(name) for name in self.joints]
self.joints_names = []
for idx in range(0,len(self.joints)):
self.joints_names.append(self.joints[idx])
#rospy.logerr(self.joints_names)
# action server
#name = rospy.get_param('~controllers/'+name+'/action_name','follow_joint_trajectory')
self.name = self.ns + '/follow_joint_trajectory'
self.server = actionlib.SimpleActionServer(self.name, FollowJointTrajectoryAction, execute_cb=self.actionCb, auto_start=False)
rospy.loginfo("Started FollowController ("+self.name+"). Joints: " + str(self.joints))
def startup(self):
self.server.start()
def actionCb(self, goal):
rospy.loginfo(self.name + ": Action goal recieved.")
traj = goal.trajectory
if set(self.joints_names) != set(traj.joint_names):
msg = "Trajectory joint names does not match action controlled joints." + str(traj.joint_names )
rospy.logerr(msg)
rospy.logerr(self.joints_names)
self.server.set_aborted(text=msg)
return
if not traj.points:
msg = "Trajectory empty."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
msg = "Trajectory invalid."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
if self.executeTrajectory(traj):
self.server.set_succeeded()
rospy.loginfo('Executed.')
else:
rospy.logerr('Execution failed.')
self.server.set_aborted(text="Execution failed.")
def executeTrajectory(self, traj):
rospy.loginfo("Executing trajectory with " + str(len(traj.points)) + ' point(s)')
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
return False
time = rospy.Time.now()
start = traj.header.stamp
#success = True
for point in traj.points:
if self.server.is_preempt_requested():
rospy.loginfo('Stopping arm movement')
self.server.set_preempted()
#success = False
break
while rospy.Time.now() + rospy.Duration(0.01) < start:
rospy.sleep(0.01)
desired = [ point.positions[k] for k in indexes ]
endtime = start + point.time_from_start
for i in range(0,len(self.joints)):
self.joint_pubs[i].command(desired[i])
while rospy.Time.now() + rospy.Duration(0.01) < endtime:
rospy.sleep(0.01)
return True
#===============================================================================
# def active(self):
# """ Is controller overriding servo internal control? """
# return self.server.is_active() or self.executing
#
# def getDiagnostics(self):
# """ Get a diagnostics status. """
# msg = DiagnosticStatus()
# msg.name = self.name
# msg.level = DiagnosticStatus.OK
# msg.message = "OK"
# if self.active():
# msg.values.append(KeyValue("State", "Active"))
# else:
# msg.values.append(KeyValue("State", "Not Active"))
# return msg
#===============================================================================
if __name__ == '__main__':
rospy.init_node('Right_arm_follow_joint_controller', anonymous=False)
rospy.loginfo('right arm Follow joint action node.')
c = FollowController()
rospy.loginfo('Starting right arm action server')
c.startup()
rospy.loginfo('Spinning')
rospy.spin()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/networkx-1.7/build/lib.linux-i686-2.7/networkx/classes/multidigraph.py | 40 | 28598 | """Base class for MultiDiGraph."""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph # for doctests
from networkx.classes.digraph import DiGraph
from networkx.classes.multigraph import MultiGraph
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class MultiDiGraph(MultiGraph,DiGraph):
"""A directed graph class that can store multiedges.
Multiedges are multiple edges between two nodes. Each edge
can hold optional data or attributes.
A MultiDiGraph holds directed edges. Self loops are allowed.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
DiGraph
MultiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.MultiDiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. If an edge already exists, an additional
edge is created and stored using a key to identify the edge.
By default the key is the lowest unused integer.
>>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
>>> G[4]
{5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.MultiDiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2][0]['weight'] = 4.7
>>> G.edge[1][2][0]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
>>> G[1] # adjacency dict keyed by neighbor to edge attributes
... # Note: you should not change this dict manually!
{2: {0: {'weight': 4}, 1: {'color': 'blue'}}}
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,keydict in nbrsdict.items():
... for key,eattr in keydict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
[(1, 2, 4), (2, 3, 8)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
"""
def add_edge(self, u, v, key=None, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
key : hashable identifier, optional (default=lowest unused integer)
Used to distinguish multiedges between a pair of nodes.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
To replace/update edge data, use the optional key argument
to identify a unique edge. Otherwise a new edge will be created.
NetworkX algorithms designed for weighted graphs cannot use
multigraphs directly because it is not clear how to handle
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.MultiDiGraph()
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u] = {}
self.pred[u] = {}
self.node[u] = {}
if v not in self.succ:
self.succ[v] = {}
self.pred[v] = {}
self.node[v] = {}
if v in self.succ[u]:
keydict=self.adj[u][v]
if key is None:
# find a unique integer key
# other methods might be better here?
key=len(keydict)
while key in keydict:
key+=1
datadict=keydict.get(key,{})
datadict.update(attr_dict)
keydict[key]=datadict
else:
# selfloops work this way without special treatment
if key is None:
key=0
datadict={}
datadict.update(attr_dict)
keydict={key:datadict}
self.succ[u][v] = keydict
self.pred[v][u] = keydict
def remove_edge(self, u, v, key=None):
"""Remove an edge between u and v.
Parameters
----------
u,v: nodes
Remove an edge between nodes u and v.
key : hashable identifier, optional (default=None)
Used to distinguish multiple edges between a pair of nodes.
If None remove a single (abritrary) edge between u and v.
Raises
------
NetworkXError
If there is not an edge between u and v, or
if there is no edge with the specified key.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
For multiple edges
>>> G = nx.MultiDiGraph()
>>> G.add_edges_from([(1,2),(1,2),(1,2)])
>>> G.remove_edge(1,2) # remove a single (arbitrary) edge
For edges with keys
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1,2,key='first')
>>> G.add_edge(1,2,key='second')
>>> G.remove_edge(1,2,key='second')
"""
try:
d=self.adj[u][v]
except (KeyError):
raise NetworkXError(
"The edge %s-%s is not in the graph."%(u,v))
# remove the edge with specified data
if key is None:
d.popitem()
else:
try:
del d[key]
except (KeyError):
raise NetworkXError(
"The edge %s-%s with key %s is not in the graph."%(u,v,key))
if len(d)==0:
# remove the key entries if last edge
del self.succ[u][v]
del self.pred[v][u]
def edges_iter(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data and keys
in the order (node, neighbor, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,keydict in nbrs.items():
for key,data in keydict.items():
if keys:
yield (n,nbr,key,data)
else:
yield (n,nbr,data)
else:
for n,nbrs in nodes_nbrs:
for nbr,keydict in nbrs.items():
for key,data in keydict.items():
if keys:
yield (n,nbr,key)
else:
yield (n,nbr)
# alias out_edges to edges
out_edges_iter=edges_iter
def out_edges(self, nbunch=None, keys=False, data=False):
"""Return a list of the outgoing edges.
Edges are returned as tuples with optional data and keys
in the order (node, neighbor, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
out_edges : list
An listr of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs edges() is the same as out_edges().
See Also
--------
in_edges: return a list of incoming edges
"""
return list(self.out_edges_iter(nbunch, keys=keys, data=data))
def in_edges_iter(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
in_edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,keydict in nbrs.items():
for key,data in keydict.items():
if keys:
yield (nbr,n,key,data)
else:
yield (nbr,n,data)
else:
for n,nbrs in nodes_nbrs:
for nbr,keydict in nbrs.items():
for key,data in keydict.items():
if keys:
yield (nbr,n,key)
else:
yield (nbr,n)
def in_edges(self, nbunch=None, keys=False, data=False):
"""Return a list of the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
in_edges : list
A list of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
out_edges: return a list of outgoing edges
"""
return list(self.in_edges_iter(nbunch, keys=keys, data=data))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
else:
nodes_nbrs=zip(
((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
if weight is None:
for (n,succ),(n2,pred) in nodes_nbrs:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
yield (n, indeg + outdeg)
else:
# edge weighted graph - degree is sum of nbr edge weights
for (n,succ),(n2,pred) in nodes_nbrs:
deg = sum([d.get(weight,1)
for data in pred.values()
for d in data.values()])
deg += sum([d.get(weight,1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
def in_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]) )
else:
# edge weighted graph - degree is sum of nbr edge weights
for n,pred in nodes_nbrs:
deg = sum([d.get(weight,1)
for data in pred.values()
for d in data.values()])
yield (n, deg)
def out_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.succ.items()
else:
nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]) )
else:
for n,succ in nodes_nbrs:
deg = sum([d.get(weight,1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return True
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : MultiDiGraph
A deepcopy of the graph.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : MultiGraph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
"""
H=MultiGraph()
H.name=self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from( (u,v,key,deepcopy(data))
for u,nbrs in self.adjacency_iter()
for v,keydict in nbrs.items()
for key,data in keydict.items()
if self.has_edge(v,u,key))
else:
H.add_edges_from( (u,v,key,deepcopy(data))
for u,nbrs in self.adjacency_iter()
for v,keydict in nbrs.items()
for key,data in keydict.items())
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_succ=H.succ
H_pred=H.pred
self_succ=self.succ
self_pred=self.pred
# add nodes
for n in H:
H_succ[n]={}
H_pred[n]={}
# add edges
for u in H_succ:
Hnbrs=H_succ[u]
for v,edgedict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
# they share the same edgedict
ed=edgedict.copy()
Hnbrs[v]=ed
H_pred[v][u]=ed
H.graph=self.graph
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from( (v,u,k,deepcopy(d)) for u,v,k,d
in self.edges(keys=True, data=True) )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
else:
self.pred,self.succ=self.succ,self.pred
self.adj=self.succ
H=self
return H
| mit |
ZhaoCJ/django | django/contrib/admin/templatetags/log.py | 114 | 2125 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause |
zhenshengcai/floodlight-hardware | src/main/python/debugserver.py | 110 | 2124 | #!/usr/bin/env python
import sys
from threading import currentThread
from SocketServer import BaseRequestHandler, TCPServer
from code import InteractiveConsole
_locals = None
class DebugLogger(object):
def do_print(self, *args):
for i in args:
print i,
print
info = do_print
warn = do_print
debug = do_print
_log = DebugLogger()
class DebugConsole(InteractiveConsole):
def __init__(self, request):
self.request = request
InteractiveConsole.__init__(self, _locals)
def raw_input(self, prompt):
self.request.send(prompt)
data = self.request.recv(10000).rstrip()
if len(data) == 1 and ord(data[0]) == 4:
sys.exit()
return data
def write(self, data):
self.request.send(str(data))
def write_nl(self, data):
self.write(str(data)+"\r\n")
class DebugServerHandler(BaseRequestHandler):
def __init__(self, request, client_address, server):
currentThread()._thread.setName("debugserver-%s:%d" % client_address)
_log.debug('Open connection to DebugServer from %s:%d' % client_address)
BaseRequestHandler.__init__(self, request, client_address, server)
def handle(self):
console = DebugConsole(self.request)
sys.displayhook = console.write_nl
console.interact('DebugServer')
self.request.close()
class DebugServer(TCPServer):
daemon_threads = True
allow_reuse_address = True
def handle_error(self, request, client_address):
_log.debug('Closing connection to DebugServer from %s:%d' % client_address)
request.close()
def run_server(port=6655, host='', locals=locals()):
currentThread()._thread.setName("debugserver-main")
global _locals
_locals = locals
if "log" in locals.keys():
global _log
_log = locals["log"]
_log.info("Starting DebugServer on %s:%d" % (host, port))
server = DebugServer((host, port), DebugServerHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
run_server()
| apache-2.0 |
prataprc/tss | tss/tyrstyle.py | 1 | 3681 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# Copyright (c) 2011 R Pratap Chakravarthy
from argparse import ArgumentParser
from os.path import isfile, join, dirname, basename
import pluggdapps
from pluggdapps.platform import Pluggdapps
from pluggdapps.plugin import ISettings
import tss
from tss.parser import TSSParser
def _option_parse() :
"""Parse the options and check whether the semantics are correct."""
parser = OptionParser(usage="usage: %prog [options] filename")
parser.add_option( '-o', '--outfile', dest='ofile', default=None,
help='Output html file to store translated result' )
parser.add_option( '-d', action='store_true', dest='dump',
help='Dump translation' )
parser.add_option( '-s', action='store_true', dest='show',
help='Show AST parse tree' )
parser.add_option( '-t', action='store_true', dest='generate',
help='Generate python executable' )
parser.add_option( '-x', action='store_true', dest='execute',
help='Executable and generate html' )
parser.add_option( '-a', dest='args', default='[]',
help='Argument to template' )
parser.add_option( '-c', dest='context', default='{}',
help='Context to template' )
parser.add_option( '-g', dest='debug', default='0',
help='Debug level for PLY parser' )
parser.add_option( '--version', action='store_true', dest='version',
help='Version information of the package' )
options, args = parser.parse_args()
return options, args
def main() :
options, args = _option_parse()
if options.version :
print tss.__version__
elif args and isfile(args[0]) :
ttlloc = args.pop(0)
tss.tss_cmdline( ttlloc, _options=options )
elif int(options.debug) :
TSSParser( tssconfig={}, debug=int(options.debug) )
def main()
argparser = mainoptions()
options = argparser.parse_args()
pa = Pluggdapps.boot( None )
# Initialize plugins
setts = {
'lex_debug' : int( options.debug ),
'yacc_debug' : int( options.debug ),
'debug' : True,
}
compiler = pa.query_plugin( pa, ISettings, 'tsscompiler', settings=setts )
if options.version :
print( tss.__version__ )
elif options.test :
pass
elif options.ttllex and options.ttlfile :
print( "Lexing file %r ..." % options.ttlfile )
lexical( pa, options )
elif options.dump and options.ttlfile :
print( "Parsing and dumping file %r ..." % options.ttlfile )
ast = yaccer( pa, options, debuglevel=int(options.debug) )
dumptext = ast.dump( context=Context() )
text = open( options.ttlfile, encoding='utf-8-sig' ).read()
if dumptext != text :
open( 'dump', 'w' ).write( dumptext )
open( 'text', 'w' ).write( text )
assert False
print( "Dump of AST matches the original text :)")
elif options.show and options.ttlfile :
print( "Parsing and describing file %r ..." % options.ttlfile )
ast = yaccer( pa, options, debuglevel=int(options.debug) )
ast.show()
elif options.ttlfile and isfile( options.ttlfile ) :
print( "Translating file %r ..." % options.ttlfile )
tayra.translatefile( options.ttlfile, compiler, options )
if __name__ == '__main__' :
main()
| gpl-3.0 |
deep0892/jitendrachatbot | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
centrumholdings/cthulhubot | tests/example_project/tests/test_api.py | 1 | 2383 | from urllib2 import urlopen
import os
from cthulhubot.views import create_job_assignment
from django.conf import settings
from cthulhubot.models import Job, BuildComputer, ProjectClient
from django.utils.simplejson import dumps
from tests.helpers import BuildmasterTestCase
class TestMasterApi(BuildmasterTestCase):
"""
Test our buildmaster scheduler plugin
"""
def setUp(self):
super(TestMasterApi, self).setUp()
self.computer_model = self.computer = BuildComputer.objects.create(hostname = "localhost", basedir=self.base_directory)
self.job = job = Job.objects.create(slug='cthulhubot-debian-package-creation').get_domain_object()
self.job.auto_discovery()
self.assignment = create_job_assignment(
computer = self.computer_model,
job = job,
project = self.project,
params = {
'commands' : [
{
'command' : 'cthulhubot-git',
'parameters' : {
'repository' : '/tmp/repo.git',
}
},
{},
{},
{
'command' : 'cthulhubot-debian-package-ftp-upload',
'parameters' : {
'ftp_user' : 'xxx',
'ftp_password' : 'xxx',
'ftp_directory' : '',
'ftp_host' : ''
}
}
]}
)
self.project_client = self.assignment.get_client()
self.build_directory = os.path.join(self.base_directory, self.project_client.get_identifier())
self.transaction.commit()
self.buildmaster.start()
self.project_client.create_build_directory()
self.project_client.start()
def test_rewrite_this_test(self):
port = self.buildmaster.api_port
s = 'http://%s:%s/force_build' % (settings.BUILDMASTER_NETWORK_NAME, port)
data = {
'changeset' : 'FETCH_HEAD',
'builder' : ProjectClient.objects.all()[0].get_identifier()
}
f = urlopen(s, dumps(data))
self.assert_equals('OK', f.read())
def tearDown(self):
self.project_client.stop()
self.assignment.delete()
super(TestMasterApi, self).tearDown()
| bsd-3-clause |
himleyb85/django | django/utils/timesince.py | 409 | 2671 | from __future__ import unicode_literals
import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import ugettext, ungettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
(60 * 60, ungettext_lazy('%d hour', '%d hours')),
(60, ungettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
delta = (d - now) if reversed else (now - d)
# Deal with leapyears by subtracing the number of leapdays
delta -= datetime.timedelta(calendar.leapdays(d.year, now.year))
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(ugettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += ugettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
| bsd-3-clause |
darkenk/scripts | android/project_creator/venv/lib/python3.5/site-packages/pip/_vendor/requests/structures.py | 615 | 3012 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
from .compat import OrderedDict
class CaseInsensitiveDict(collections.MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| bsd-3-clause |
BT-ojossen/sale-workflow | sale_quotation_sourcing_stock_route_transit/model/sale_order_line.py | 34 | 1998 | # -*- coding: utf-8 -*-
#
# Author: Alexandre Fayolle, Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.model
def _get_po_location_usage(self, purchase_order_line):
"""in case the PO delivers to a transit stock.location, implement a correct
usage computation.
If there is a stock.warehouse with this location set as input transit
location, then we consider this as 'internal'; else if there is a stock
warehouse with this location set as output transit location, then we
consider this as 'customer'.
"""
_super = super(SaleOrderLine, self)
usage = _super._get_po_location_usage(purchase_order_line)
if usage == 'transit':
StockWarehouse = self.env['stock.warehouse']
location = purchase_order_line.order_id.location_id
domain_input = [('wh_transit_in_loc_id', '=', location.id)]
domain_output = [('wh_transit_out_loc_id', '=', location.id)]
if StockWarehouse.search(domain_input, limit=1):
usage = 'internal'
elif StockWarehouse.search(domain_output, limit=1):
usage = 'customer'
return usage
| agpl-3.0 |
sylarcp/anita | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/__init__.py | 838 | 1384 | '''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
from . import urllib3
except ImportError:
import urllib3
sys.modules['%s.urllib3' % __name__] = urllib3
try:
from . import chardet
except ImportError:
import chardet
sys.modules['%s.chardet' % __name__] = chardet
| mit |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/collections.py | 35 | 25883 | __all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| gpl-2.0 |
bchavez/rethinkdb | test/common/http_support/jinja2/testsuite/filters.py | 394 | 19169 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
from jinja2._compat import text_type, implements_to_string
env = Environment()
class FilterTestCase(JinjaTestCase):
def test_filter_calling(self):
rv = env.call_filter('sum', [1, 2, 3])
self.assert_equal(rv, 6)
def test_capitalize(self):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB'
))
def test_filesizeformat_issue59(self):
tmpl = env.from_string(
'{{ 300|filesizeformat }}|'
'{{ 3000|filesizeformat }}|'
'{{ 3000000|filesizeformat }}|'
'{{ 3000000000|filesizeformat }}|'
'{{ 3000000000000|filesizeformat }}|'
'{{ 300|filesizeformat(true) }}|'
'{{ 3000|filesizeformat(true) }}|'
'{{ 3000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|'
'2.9 KiB|2.9 MiB'
))
def test_first(self):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=list(range(10)))
assert out == '0'
def test_float(self):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}')
out = tmpl.render()
assert out == '42|0|32'
def test_join(self):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=list(range(10)))
assert out == '9'
def test_length(self):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self):
tmpl = env.from_string('''{{ seq|random }}''')
seq = list(range(100))
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == text_type(x)
def test_title(self):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "foo's bar"|title }}''')
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "f bar f"|title }}''')
assert tmpl.render() == "F Bar F"
tmpl = env.from_string('''{{ "foo-bar"|title }}''')
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string('''{{ "foo\tbar"|title }}''')
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string('''{{ "FOO\tBAR"|title }}''')
assert tmpl.render() == "Foo\tBar"
def test_truncate(self):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalldata|truncate(15) }}'
)
out = tmpl.render(data='foobar baz bar' * 1000,
smalldata='foobar baz bar')
assert out == 'foobar baz barf>>>|foobar baz >>>|foobar baz bar'
def test_upper(self):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == 'FOO'
def test_urlize(self):
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_wordcount(self):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == '3'
def test_block(self):
tmpl = env.from_string('{% filter lower|escape %}<HEHE>{% endfilter %}')
assert tmpl.render() == '<hehe>'
def test_chaining(self):
tmpl = env.from_string('''{{ ['<foo>', '<bar>']|first|upper|escape }}''')
assert tmpl.render() == '<FOO>'
def test_sum(self):
tmpl = env.from_string('''{{ [1, 2, 3, 4, 5, 6]|sum }}''')
assert tmpl.render() == '21'
def test_sum_attributes(self):
tmpl = env.from_string('''{{ values|sum('value') }}''')
assert tmpl.render(values=[
{'value': 23},
{'value': 1},
{'value': 18},
]) == '42'
def test_sum_attributes_nested(self):
tmpl = env.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_sum_attributes_tuple(self):
tmpl = env.from_string('''{{ values.items()|sum('1') }}''')
assert tmpl.render(values={
'foo': 23,
'bar': 1,
'baz': 18,
}) == '42'
def test_abs(self):
tmpl = env.from_string('''{{ -1|abs }}|{{ 1|abs }}''')
assert tmpl.render() == '1|1', tmpl.render()
def test_round_positive(self):
tmpl = env.from_string('{{ 2.7|round }}|{{ 2.1|round }}|'
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}")
assert tmpl.render() == '3.0|2.0|2.123|3.0', tmpl.render()
def test_round_negative(self):
tmpl = env.from_string('{{ 21.3|round(-1)}}|'
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}")
assert tmpl.render() == '20.0|30.0|20.0',tmpl.render()
def test_xmlattr(self):
tmpl = env.from_string("{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}")
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self):
tmpl = env.from_string('{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_sort2(self):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == 'AbcD'
def test_sort3(self):
tmpl = env.from_string('''{{ ['foo', 'Bar', 'blah']|sort }}''')
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self):
@implements_to_string
class Magic(object):
def __init__(self, value):
self.value = value
def __str__(self):
return text_type(self.value)
tmpl = env.from_string('''{{ items|sort(attribute='value')|join }}''')
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == '1234'
def test_groupby(self):
tmpl = env.from_string('''
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render().split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
def test_groupby_tuple_index(self):
tmpl = env.from_string('''
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render() == 'a:1:2|b:1|'
def test_groupby_multidot(self):
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
articles = [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
tmpl = env.from_string('''
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
def test_filtertag(self):
tmpl = env.from_string("{% filter upper|replace('FOO', 'foo') %}"
"foobar{% endfilter %}")
assert tmpl.render() == 'fooBAR'
def test_replace(self):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string='<foo>') == '42foo>'
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup('foo')) == 'f>x<>x<'
def test_forceescape(self):
tmpl = env.from_string('{{ x|forceescape }}')
assert tmpl.render(x=Markup('<div />')) == u'<div />'
def test_safe(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == '<div>foo</div>'
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == '<div>foo</div>'
def test_urlencode(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "Hello, world!"|urlencode }}')
assert tmpl.render() == 'Hello%2C%20world%21'
tmpl = env.from_string('{{ o|urlencode }}')
assert tmpl.render(o=u"Hello, world\u203d") == "Hello%2C%20world%E2%80%BD"
assert tmpl.render(o=(("f", 1),)) == "f=1"
assert tmpl.render(o=(('f', 1), ("z", 2))) == "f=1&z=2"
assert tmpl.render(o=((u"\u203d", 1),)) == "%E2%80%BD=1"
assert tmpl.render(o={u"\u203d": 1}) == "%E2%80%BD=1"
assert tmpl.render(o={0: 1}) == "0=1"
def test_simple_map(self):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
self.assertEqual(tmpl.render(), '6')
def test_attribute_map(self):
class User(object):
def __init__(self, name):
self.name = name
env = Environment()
users = [
User('john'),
User('jane'),
User('mike'),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane|mike')
def test_empty_map(self):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
self.assertEqual(tmpl.render(), '[]')
def test_simple_select(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '1|3|5')
def test_bool_select(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
self.assertEqual(tmpl.render(), '1|2|3|4|5')
def test_simple_reject(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '2|4')
def test_bool_reject(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
self.assertEqual(tmpl.render(), 'None|False|0')
def test_simple_select_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|selectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane')
def test_simple_reject_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|rejectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'mike')
def test_func_select_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|selectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|mike')
def test_func_reject_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|rejectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'jane')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FilterTestCase))
return suite
| agpl-3.0 |
chokribr/PIST | modules/bibclassify/lib/bibclassify_acronym_analyzer.py | 32 | 9391 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Bibclassify acronym analyser.
"""
import re
ACRONYM_BRACKETS_REGEX = re.compile("[([] ?(([a-zA-Z]\.?){2,})s? ?[)\]]")
DOTS_REGEX = re.compile("\.")
MAXIMUM_LEVEL = 2
STOPLIST = ("and", "of", "for", "the", "to", "do", "de", "theory",
"model", "radiation", "scheme", "representation")
# INTERFACE
def get_acronyms(fulltext):
"""Finds acronyms and expansions from the fulltext. If needed,
acronyms can already contain a dictionary of previously found
acronyms that will be merged with the current results."""
acronyms = {}
for m in ACRONYM_BRACKETS_REGEX.finditer(fulltext):
acronym = DOTS_REGEX.sub("", m.group(1))
potential_expansion = fulltext[m.start() - 80:m.start()].replace("\n",
" ")
# Strip
potential_expansion = re.sub("(\W).(\W)", "\1\2", potential_expansion)
potential_expansion = re.sub("(\w)\(s\)\W", "\1", potential_expansion)
potential_expansion = re.sub("""[^\w'"]+$""", "", potential_expansion)
potential_expansion = re.sub("[[(].+[\])]", "", potential_expansion)
potential_expansion = re.sub(" {2,}", " ", potential_expansion)
# LEVEL 0: expansion between quotes
# Double quotes
match = re.search(""""([^"]+)["]$""", potential_expansion)
if match is None:
# Single quotes
match = re.search("""'([^"]+)[']$""", potential_expansion)
if match is not None:
if acronym in match.group(1):
continue
pattern = ""
for char in acronym[:-1]:
pattern += "%s\w+\W*" % char
pattern += "%s\w+" % acronym[-1]
if re.search(pattern, match.group(1), re.I) is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 0,
acronyms)
continue
pattern = "\W("
for char in acronym[:-1]:
pattern += "%s\w+\W+" % char
pattern += "%s\w+)$" % acronym[-1]
# LEVEL 1: expansion with uppercase initials
match = re.search(pattern, potential_expansion)
if match is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 1,
acronyms)
continue
# LEVEL 2: expansion with initials
match = re.search(pattern, potential_expansion, re.I)
if match is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 2,
acronyms)
continue
# LEVEL 3: expansion with initials and STOPLIST
potential_expansion_stripped = " ".join([word for word in
_words(potential_expansion) if word not in STOPLIST])
match = re.search(pattern, potential_expansion_stripped, re.I)
if match is not None:
first_expansion_word = re.search("\w+", match.group(1)).group()
start = potential_expansion.lower().rfind(first_expansion_word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 3, acronyms)
continue
# LEVEL 4: expansion with fuzzy initials and stoplist
reversed_words = _words(potential_expansion_stripped)
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = "_"
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = "_"
if char == next_char and \
word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 4, acronyms)
continue
# LEVEL 5: expansion with fuzzy initials
reversed_words = _words(potential_expansion.lower())
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = ""
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = ""
if char == next_char and \
word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 5, acronyms)
continue
return acronyms
# PRIVATE METHODS
def _words(expression):
"""Returns a list of words of the expression."""
return re.findall("\w+", expression.lower())
def _add_expansion_to_acronym_dict(acronym, expansion, level, dictionary):
"""Adds an acronym to the dictionary. Takes care of avoiding
duplicates and keeping the expansion marked with the best score."""
if len(acronym) >= len(expansion) or acronym in expansion:
return
for punctuation in re.findall("\W", expansion):
# The expansion contains non-basic punctuation. It is probable
# that it is invalid. Discard it.
if punctuation not in (",", " ", "-"):
return False
if acronym in dictionary:
add = True
for stored_expansion, stored_level in dictionary[acronym]:
if _equivalent_expansions(stored_expansion, expansion):
if level < stored_level:
dictionary[acronym].remove((stored_expansion, stored_level))
break
else:
add = False
if add:
dictionary[acronym].append((expansion, level))
return True
else:
dictionary.setdefault(acronym, []).append((expansion, level))
return True
return False
def _equivalent_expansions(expansion1, expansion2):
"""Compares 2 expansions."""
words1 = _words(expansion1)
words2 = _words(expansion2)
simplified_versions = []
if words1 == words2:
return True
for words in (words1, words2):
store = []
for word in words:
store.append(word[:5])
simplified_versions.append("".join(store))
return simplified_versions[0] == simplified_versions[1]
if __name__ == "__main__":
print get_acronyms("asymptomatically de Sitter(dS). and what one large relative symmetric (LRS) which always has general relativity (GR)")
| gpl-2.0 |
nickhdamico/py | lib/requests/auth.py | 120 | 6669 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| gpl-3.0 |
home-assistant/home-assistant | tests/components/powerwall/test_binary_sensor.py | 8 | 2556 | """The binary sensor tests for the powerwall platform."""
from unittest.mock import patch
from homeassistant.components.powerwall.const import DOMAIN
from homeassistant.const import CONF_IP_ADDRESS, STATE_ON
from .mocks import _mock_powerwall_with_fixtures
from tests.common import MockConfigEntry
async def test_sensors(hass):
"""Test creation of the binary sensors."""
mock_powerwall = await _mock_powerwall_with_fixtures(hass)
config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_IP_ADDRESS: "1.2.3.4"})
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
), patch(
"homeassistant.components.powerwall.Powerwall", return_value=mock_powerwall
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.grid_status")
assert state.state == STATE_ON
expected_attributes = {"friendly_name": "Grid Status", "device_class": "power"}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
state = hass.states.get("binary_sensor.powerwall_status")
assert state.state == STATE_ON
expected_attributes = {
"friendly_name": "Powerwall Status",
"device_class": "power",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
state = hass.states.get("binary_sensor.powerwall_connected_to_tesla")
assert state.state == STATE_ON
expected_attributes = {
"friendly_name": "Powerwall Connected to Tesla",
"device_class": "connectivity",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
state = hass.states.get("binary_sensor.powerwall_charging")
assert state.state == STATE_ON
expected_attributes = {
"friendly_name": "Powerwall Charging",
"device_class": "battery_charging",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
| apache-2.0 |
breadchris/CTFd | CTFd/scoreboard.py | 1 | 2698 | from flask import current_app as app, session, render_template, jsonify, Blueprint, redirect, url_for, request
from CTFd.utils import unix_time, authed, get_config
from CTFd.models import db, Teams, Solves, Challenges
scoreboard = Blueprint('scoreboard', __name__)
@scoreboard.route('/scoreboard')
def scoreboard_view():
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score)\
.join(Teams)\
.join(Challenges)\
.filter(Teams.banned == None)\
.group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
return render_template('scoreboard.html', teams=teams)
@scoreboard.route('/scores')
def scores():
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score)\
.join(Teams)\
.join(Challenges)\
.filter(Teams.banned == None)\
.group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
json = {'standings':[]}
for i, x in enumerate(teams):
json['standings'].append({'pos':i+1, 'id':x.teamid, 'team':x.name,'score':int(x.score)})
return jsonify(json)
@scoreboard.route('/top/<count>')
def topteams(count):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
try:
count = int(count)
except:
count = 10
if count > 20 or count < 0:
count = 10
json = {'scores':{}}
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score)\
.join(Teams)\
.join(Challenges)\
.filter(Teams.banned == None)\
.group_by(Solves.teamid).order_by(score.desc(), quickest)\
.limit(count)
for team in teams:
solves = Solves.query.filter_by(teamid=team.teamid).all()
json['scores'][team.name] = []
for x in solves:
json['scores'][team.name].append({
'id': x.teamid,
'chal': x.chalid,
'team': x.teamid,
'value': x.chal.value,
'time': unix_time(x.date)
})
return jsonify(json)
| apache-2.0 |
timlinux/inasafe | safe/report/expressions/html_report.py | 1 | 23685 | # coding=utf-8
"""QGIS Expressions which are available in the QGIS GUI interface."""
import codecs
from os.path import dirname, join, exists
from xml.etree import ElementTree as ET
from qgis.core import (
qgsfunction,
QgsExpressionContextUtils,
QgsProject,
QgsLayerTreeGroup,
QgsLayerTreeLayer)
from safe.definitions.constants import MULTI_EXPOSURE_ANALYSIS_FLAG
from safe.definitions.exposure import (
exposure_population,
exposure_road,
exposure_structure,
exposure_place,
exposure_land_cover)
from safe.definitions.extra_keywords import extra_keyword_analysis_type
from safe.definitions.provenance import provenance_layer_analysis_impacted
from safe.definitions.reports.components import (
analysis_question_component,
general_report_component,
mmi_detail_component,
analysis_detail_component,
action_checklist_component,
notes_assumptions_component,
minimum_needs_component,
aggregation_result_component,
aggregation_postprocessors_component,
analysis_provenance_details_simplified_component)
from safe.utilities.i18n import tr
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.utilities import generate_expression_help
__copyright__ = "Copyright 2017, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
group = tr('InaSAFE - HTML Elements')
container_format = (
u'<div class="container">'
u' {section_content}'
u'</div>'
)
def get_analysis_dir(exposure_key=None):
"""Retrieve an output directory of an analysis/ImpactFunction from a
multi exposure analysis/ImpactFunction based on exposure type.
:param exposure_key: An exposure keyword.
:type exposure_key: str
:return: A directory contains analysis outputs.
:rtype: str
"""
keyword_io = KeywordIO()
layer_tree_root = QgsProject.instance().layerTreeRoot()
all_groups = [
child for child in layer_tree_root.children() if (
isinstance(child, QgsLayerTreeGroup))]
multi_exposure_group = None
for group in all_groups:
if group.customProperty(MULTI_EXPOSURE_ANALYSIS_FLAG):
multi_exposure_group = group
break
if multi_exposure_group:
multi_exposure_tree_layers = [
child for child in multi_exposure_group.children() if (
isinstance(child, QgsLayerTreeLayer))]
exposure_groups = [
child for child in multi_exposure_group.children() if (
isinstance(child, QgsLayerTreeGroup))]
def get_report_ready_layer(tree_layers):
"""Get a layer which has a report inn its directory.
:param tree_layers: A list of tree layer nodes (QgsLayerTreeLayer)
:type tree_layers: list
:return: A vector layer
:rtype: QgsMapLayer
"""
for tree_layer in tree_layers:
layer = tree_layer.layer()
keywords = keyword_io.read_keywords(layer)
extra_keywords_found = keywords.get('extra_keywords')
provenance = keywords.get('provenance_data')
if provenance:
exposure_keywords = provenance.get('exposure_keywords', {})
exposure_key_found = exposure_keywords.get('exposure')
if exposure_key_found and (
exposure_key == exposure_key_found):
return layer
if not exposure_key and extra_keywords_found and (
extra_keywords_found[
extra_keyword_analysis_type['key']] == (
MULTI_EXPOSURE_ANALYSIS_FLAG)):
return layer
return None
layer = get_report_ready_layer(multi_exposure_tree_layers)
if not layer:
for exposure_group in exposure_groups:
tree_layers = [
child for child in exposure_group.children() if (
isinstance(child, QgsLayerTreeLayer))]
layer = get_report_ready_layer(tree_layers)
if layer:
break
if layer:
return dirname(layer.source())
return None
def get_impact_report_as_string(analysis_dir):
"""Retrieve an html string of table report (impact-report-output.html).
:param analysis_dir: Directory of where the report located.
:type analysis_dir: str
:return: HTML string of the report.
:rtype: str
"""
html_report_products = [
'impact-report-output.html',
'multi-exposure-impact-report-output.html']
output_dir_path = join(analysis_dir, 'output')
for html_report_product in html_report_products:
table_report_path = join(output_dir_path, html_report_product)
if exists(table_report_path):
break
table_report_path = None
if not table_report_path:
return None
# We can display an impact report.
# We need to open the file in UTF-8, the HTML may have some accents
with codecs.open(table_report_path, 'r', 'utf-8') as table_report_file:
report = table_report_file.read()
return report
def get_report_section(
html_report, component_id, container_wrapper_format=container_format):
"""Get specific report section from InaSAFE analysis summary report.
:param html_report: The html report.
:type html_report: basestring
:param component_id: The component key.
:type component_id: str
:param container_wrapper_format: A string format for wrapping the section.
:type container_wrapper_format: basestring
:return: Requested report section as an html.
:rtype: basestring
"""
no_element_error = tr('No element match the tag or component id.')
root_element, dict_of_elements = ET.XMLID(html_report)
section_element = dict_of_elements.get(component_id)
if section_element:
requested_section = container_wrapper_format.format(
section_content=unicode(ET.tostring(section_element)))
return requested_section
else:
return no_element_error
##
# For QGIS < 2.18.13 and QGIS < 2.14.19, docstrings are used in the QGIS GUI
# in the Expression dialog and also in the InaSAFE Help dialog.
#
# For QGIS >= 2.18.13, QGIS >= 2.14.19 and QGIS 3, the translated variable will
# be used in QGIS.
# help_text is used for QGIS 2.18 and 2.14
# helpText is used for QGIS 3 : https://github.com/qgis/QGIS/pull/5059
##
description = tr('Retrieve default InaSAFE HTML resources (style and script) '
'from InaSAFE analysis report of current selected analysis.')
examples = {
'inasafe_html_resources()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def default_inasafe_html_resources(feature, parent):
"""Retrieve default InaSAFE HTML resources (style and script).
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report,
component_id='inasafe-html-resources',
container_wrapper_format=u'{section_content}')
return requested_html_report
description = tr('Retrieve an HTML table report of current selected analysis.')
examples = {
'analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def analysis_summary_report(feature, parent):
"""Retrieve an HTML table report of current selected analysis.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
return get_impact_report_as_string(analysis_dir)
description = tr('Retrieve the analysis question section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'analysis_question_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def analysis_question_report(feature, parent):
"""Retrieve the analysis question section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=analysis_question_component['key'])
return requested_html_report
description = tr('Retrieve the general report section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'general_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def general_report(feature, parent):
"""Retrieve the general report section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=general_report_component['key'])
return requested_html_report
description = tr('Retrieve the mmi detail section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'mmi_detail_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def mmi_detail_report(feature, parent):
"""Retrieve the mmi detail section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=mmi_detail_component['key'])
return requested_html_report
description = tr('Retrieve the analysis detail section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'analysis_detail_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def analysis_detail_report(feature, parent):
"""Retrieve the analysis detail section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=analysis_detail_component['key'])
return requested_html_report
description = tr('Retrieve the action checklist section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'action_checklist_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def action_checklist_report(feature, parent):
"""Retrieve the action checklist section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=action_checklist_component['key'])
return requested_html_report
description = tr('Retrieve the notes assumptions section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'notes_assumptions_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def notes_assumptions_report(feature, parent):
"""Retrieve the notes assumptions section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=notes_assumptions_component['key'])
return requested_html_report
description = tr('Retrieve the minimum needs section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'minimum_needs_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def minimum_needs_report(feature, parent):
"""Retrieve the minimum needs section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=minimum_needs_component['key'])
return requested_html_report
description = tr('Retrieve the aggregation result section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'aggregation_result_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def aggregation_result_report(feature, parent):
"""Retrieve the aggregation result section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report, component_id=aggregation_result_component['key'])
return requested_html_report
description = tr('Retrieve the aggregation postprocessors section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'aggregation_postprocessors_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def aggregation_postprocessors_report(feature, parent):
"""Retrieve the aggregation postprocessors section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report,
component_id=aggregation_postprocessors_component['key'])
return requested_html_report
description = tr('Retrieve the analysis provenance details section from '
'InaSAFE analysis report of current selected analysis.')
examples = {
'analysis_provenance_details_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def analysis_provenance_details_report(feature, parent):
"""Retrieve the analysis provenance details section from InaSAFE report.
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
key = provenance_layer_analysis_impacted['provenance_key']
if not project_context_scope.hasVariable(key):
return None
analysis_dir = dirname(project_context_scope.variable(key))
complete_html_report = get_impact_report_as_string(analysis_dir)
requested_html_report = get_report_section(
complete_html_report,
component_id=analysis_provenance_details_simplified_component['key'])
return requested_html_report
description = tr('Retrieve an HTML population analysis table report from '
'a multi exposure analysis.')
examples = {
'population_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def population_analysis_summary_report(feature, parent):
"""Retrieve an HTML population analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_population['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
description = tr('Retrieve an HTML road analysis table report from '
'a multi exposure analysis.')
examples = {
'road_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def road_analysis_summary_report(feature, parent):
"""Retrieve an HTML road analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_road['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
description = tr('Retrieve an HTML structure analysis table report from '
'a multi exposure analysis.')
examples = {
'structure_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def structure_analysis_summary_report(feature, parent):
"""Retrieve an HTML structure analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_structure['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
description = tr('Retrieve an HTML place analysis table report from '
'a multi exposure analysis.')
examples = {
'place_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def place_analysis_summary_report(feature, parent):
"""Retrieve an HTML place analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_place['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
description = tr('Retrieve an HTML land cover analysis table report from '
'a multi exposure analysis.')
examples = {
'land_cover_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def land_cover_analysis_summary_report(feature, parent):
"""Retrieve an HTML land cover analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_land_cover['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
description = tr('Retrieve an HTML multi exposure analysis table report.')
examples = {
'multi_exposure_analysis_summary_report()': None
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def multi_exposure_analysis_summary_report(feature, parent):
"""Retrieve an HTML multi exposure analysis table report.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir()
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
| gpl-3.0 |
moondrop-entertainment/django-nonrel-drawp | django/utils/unittest/main.py | 332 | 9388 | """Unittest main program"""
import sys
import os
import types
from django.utils.unittest import loader, runner
try:
from django.utils.unittest.signals import installHandler
except ImportError:
installHandler = None
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s test_module.TestClass - run tests from
test_module.TestClass
%(progName)s test_module.TestClass.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=None,
testLoader=loader.defaultTestLoader, exit=True,
verbosity=1, failfast=None, catchbreak=None, buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False and installHandler is not None:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None and installHandler is not None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False and installHandler is not None:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None and installHandler is not None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
def main_():
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
| bsd-3-clause |
linkdesu/shadowsocks | tests/test.py | 1016 | 5029 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
| apache-2.0 |
woodscn/scipy | tools/win32/build_scripts/pavement.py | 89 | 8536 | import os
from os.path import join as pjoin, normpath, exists as pexists, dirname
import subprocess
from shutil import rmtree, move as shmove
import re
from zipfile import ZipFile
from lib import get_svn_version, get_scipy_version
BUILD_MSI = False
SRC_ROOT = normpath(pjoin(os.getcwd(), os.pardir, os.pardir, os.pardir))
BUILD_ROOT = os.getcwd()
PYVER = '2.5'
ARCH = 'nosse'
PYEXECS = {"2.5": "C:\python25\python.exe",
"2.4": "C:\python24\python24.exe",
"2.3": "C:\python23\python23.exe"}
_SSE3_CFG = r"""[atlas]
library_dirs = C:\local\lib\yop\sse3"""
_SSE2_CFG = r"""[atlas]
library_dirs = C:\local\lib\yop\sse2"""
_NOSSE_CFG = r"""[atlas]
library_dirs = fakedirectorywhichhopefullydoesnotexist
[DEFAULT]
library_dirs = C:\local\lib\yop\nosse"""
SITECFG = {"sse2": _SSE2_CFG, "sse3": _SSE3_CFG, "nosse": _NOSSE_CFG}
options(
clean=Bunch(
src_dir = SRC_ROOT,
pyver = PYVER
),
clean_bootstrap=Bunch(
src_dir = SRC_ROOT,
pyver = PYVER
),
build_sdist=Bunch(
src_dir = SRC_ROOT
),
build_binary=Bunch(
pyver = PYVER,
arch = ARCH,
src_root = SRC_ROOT
),
bootstrap=Bunch(
pyver = PYVER,
src_root = SRC_ROOT
),
bootstrap_arch=Bunch(
pyver = PYVER,
arch = ARCH
),
bootstrap_nsis=Bunch(
pyver = PYVER,
src_root = SRC_ROOT
)
)
# Clean everything, including bootstrap source tree
@task
def clean():
raw_clean(options.src_dir, options.pyver)
# Clean the bootstrap source tree for a clean build from scratch
@task
def clean_bootstrap():
raw_clean_bootstrap(options.pyver)
@task
def build_sdist():
raw_build_sdist(options.src_dir)
@task
@needs('build_sdist')
def bootstrap():
raw_bootstrap(options.pyver, options.src_dir)
@task
def bootstrap_arch():
pyver = options.pyver
arch = options.arch
set_bootstrap_sources(arch, pyver)
@task
def bootstrap_nsis():
pyver = options.pyver
bdir = bootstrap_dir(options.pyver)
prepare_nsis_script(bdir, pyver, get_scipy_version(options.src_root))
@task
def build_binary():
pyver = options.pyver
arch = options.arch
raw_build_arch(pyver, arch, options.src_root)
@task
@needs('bootstrap')
@needs('clean')
def build_nsis():
scipy_verstr = get_scipy_version(options.src_root)
bdir = bootstrap_dir(options.pyver)
prepare_nsis_script(bdir, options.pyver, scipy_verstr)
for arch in ['nosse', 'sse2', 'sse3']:
raw_clean_bootstrap(options.pyver)
set_bootstrap_sources(arch, options.pyver)
raw_build_arch(options.pyver, arch, options.src_root)
raw_build_nsis(options.pyver)
# Helpers
def set_bootstrap_sources(arch, pyver):
bdir = bootstrap_dir(pyver)
write_site_cfg(arch, cwd=bdir)
def get_sdist_tarball(src_root):
"""Return the name of the installer built by sdist command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
name = "scipy-%s.zip" % get_scipy_version(src_root)
return name
def prepare_scipy_sources(src_root, bootstrap):
zid = ZipFile(pjoin(src_root, 'dist', get_sdist_tarball(src_root)))
root = 'scipy-%s' % get_scipy_version(src_root)
# From the sdist-built tarball, extract all files into bootstrap directory,
# but removing the scipy-VERSION head path
for name in zid.namelist():
cnt = zid.read(name)
if name.startswith(root):
# XXX: even on windows, the path sep in zip is '/' ?
name = name.split('/', 1)[1]
newname = pjoin(bootstrap, name)
if not pexists(dirname(newname)):
os.makedirs(dirname(newname))
fid = open(newname, 'wb')
fid.write(cnt)
def prepare_nsis_script(bdir, pyver, numver):
tpl = pjoin('nsis_scripts', 'scipy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(pjoin(bdir, 'scipy-superinstaller.nsi'), 'w')
installer_name = 'scipy-%s-win32-superpack-python%s.exe' % (numver, pyver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@SCIPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
get_binary_name(arch, numver))
target.write(cnt)
def bootstrap_dir(pyver):
return pjoin(BUILD_ROOT, "bootstrap-%s" % pyver)
def get_python_exec(ver):
"""Return the executable of python for the given version."""
# XXX Check that the file actually exists
try:
return PYEXECS[ver]
except KeyError:
raise ValueError("Version %s not supported/recognized" % ver)
def write_site_cfg(arch, cwd=None):
if not cwd:
cwd = os.getcwd()
scfg = pjoin(cwd, "site.cfg")
if pexists(scfg):
os.remove(scfg)
f = open(scfg, 'w')
f.writelines(SITECFG[arch])
f.close()
def move_binary(arch, pyver, cwd, scipy_verstr):
if not pexists(pjoin(cwd, "binaries")):
os.makedirs(pjoin(cwd, "binaries"))
shmove(pjoin(cwd, 'dist', get_windist_exec(pyver, scipy_verstr)),
pjoin(cwd, 'binaries', get_binary_name(arch, scipy_verstr)))
def get_binary_name(arch, scipy_verstr):
if BUILD_MSI:
ext = '.msi'
else:
ext = '.exe'
return "scipy-%s-%s%s" % (scipy_verstr, arch, ext)
def get_windist_exec(pyver, scipy_verstr):
"""Return the name of the installer built by wininst command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
if BUILD_MSI:
ext = '.msi'
else:
ext = '.exe'
name = "scipy-%s.win32-py%s%s" % (scipy_verstr, pyver, ext)
return name
def raw_clean(src_dir, pyver):
# Clean sdist
sdir = pjoin(src_dir, "dist")
if pexists(sdir):
rmtree(sdir)
mani = pjoin(src_dir, "MANIFEST")
if pexists(mani):
os.remove(mani)
# Clean bootstrap directory
bdir = bootstrap_dir(pyver)
if pexists(bdir):
rmtree(bdir)
def raw_clean_bootstrap(pyver):
bdir = bootstrap_dir(pyver)
for d in ["build", "dist"]:
if pexists(pjoin(bdir, d)):
rmtree(pjoin(bdir, d))
if pexists(pjoin(bdir, "site.cfg")):
os.remove(pjoin(bdir, "site.cfg"))
def raw_build_sdist(cwd):
cmd = ["python", "setup.py", "sdist", "--format=zip"]
build_log = "sdist.log"
f = open(build_log, 'w')
try:
try:
st = subprocess.call(cmd, #shell = True,
stderr = subprocess.STDOUT, stdout = f,
cwd=cwd)
if st:
raise RuntimeError("The cmd failed with status %d" % st)
finally:
f.close()
except (subprocess.CalledProcessError, RuntimeError), e:
print e
msg = """
There was an error while executing the following command:
%s
Error was : %s
Look at the log (%s).""" % (cmd, str(e), build_log)
raise Exception(msg)
def raw_bootstrap(pyver, src_dir):
bdir = bootstrap_dir(pyver)
prepare_scipy_sources(src_dir, bdir)
def raw_build_arch(pyver, arch, src_root):
scipy_verstr = get_scipy_version(src_root)
bdir = bootstrap_dir(pyver)
print "Building scipy (version %s) binary for python %s, arch is %s" % \
(scipy_verstr, get_python_exec(pyver), arch)
if BUILD_MSI:
cmd = [get_python_exec(pyver), "setup.py", "build", "-c", "mingw32",
"bdist_msi"]
else:
cmd = [get_python_exec(pyver), "setup.py", "build", "-c", "mingw32",
"bdist_wininst"]
build_log = "build-%s-%s.log" % (arch, pyver)
f = open(build_log, 'w')
try:
try:
st = subprocess.call(cmd, #shell = True,
stderr = subprocess.STDOUT, stdout = f,
cwd=bdir)
if st:
raise RuntimeError("The cmd failed with status %d" % st)
finally:
f.close()
except (subprocess.CalledProcessError, RuntimeError), e:
print e
msg = """
There was an error while executing the following command:
%s
Error was : %s
Look at the build log (%s).""" % (cmd, str(e), build_log)
raise Exception(msg)
move_binary(arch, pyver, bdir, scipy_verstr)
def raw_build_nsis(pyver):
bdir = bootstrap_dir(options.pyver)
st = subprocess.call(['makensis', 'scipy-superinstaller.nsi'],
cwd=bdir)
if st:
raise RuntimeError("Error while executing makensis command")
| bsd-3-clause |
voltaicsca/deluge | deluge/core/torrent.py | 1 | 40341 | #
# torrent.py
#
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
"""Internal Torrent class"""
import os
import time
import logging
from urllib import unquote
from urlparse import urlparse
from deluge._libtorrent import lt
import deluge.common
import deluge.component as component
from deluge.configmanager import ConfigManager, get_config_dir
from deluge.event import *
TORRENT_STATE = deluge.common.TORRENT_STATE
log = logging.getLogger(__name__)
def sanitize_filepath(filepath, folder=False):
"""
Returns a sanitized filepath to pass to libotorrent rename_file().
The filepath will have backslashes substituted along with whitespace
padding and duplicate slashes stripped. If `folder` is True a trailing
slash is appended to the returned filepath.
"""
def clean_filename(filename):
filename = filename.strip()
if filename.replace('.', '') == '':
return ''
return filename
if '\\' in filepath or '/' in filepath:
folderpath = filepath.replace('\\', '/').split('/')
folderpath = [clean_filename(x) for x in folderpath]
newfilepath = '/'.join(filter(None, folderpath))
else:
newfilepath = clean_filename(filepath)
if folder is True:
return newfilepath + '/'
else:
return newfilepath
class TorrentOptions(dict):
def __init__(self):
config = ConfigManager("core.conf").config
options_conf_map = {
"max_connections": "max_connections_per_torrent",
"max_upload_slots": "max_upload_slots_per_torrent",
"max_upload_speed": "max_upload_speed_per_torrent",
"max_download_speed": "max_download_speed_per_torrent",
"prioritize_first_last_pieces": "prioritize_first_last_pieces",
"sequential_download": "sequential_download",
"compact_allocation": "compact_allocation",
"download_location": "download_location",
"auto_managed": "auto_managed",
"stop_at_ratio": "stop_seed_at_ratio",
"stop_ratio": "stop_seed_ratio",
"remove_at_ratio": "remove_seed_at_ratio",
"move_completed": "move_completed",
"move_completed_path": "move_completed_path",
"add_paused": "add_paused",
"shared": "shared"
}
for opt_k, conf_k in options_conf_map.iteritems():
self[opt_k] = config[conf_k]
self["file_priorities"] = []
self["mapped_files"] = {}
class Torrent(object):
"""Torrent holds information about torrents added to the libtorrent session.
"""
def __init__(self, handle, options, state=None, filename=None, magnet=None, owner=None):
log.debug("Creating torrent object %s", str(handle.info_hash()))
# Get the core config
self.config = ConfigManager("core.conf")
self.rpcserver = component.get("RPCServer")
# This dict holds previous status dicts returned for this torrent
# We use this to return dicts that only contain changes from the previous
# {session_id: status_dict, ...}
self.prev_status = {}
from twisted.internet.task import LoopingCall
self.prev_status_cleanup_loop = LoopingCall(self.cleanup_prev_status)
self.prev_status_cleanup_loop.start(10)
# Set the libtorrent handle
self.handle = handle
# Set the torrent_id for this torrent
self.torrent_id = str(handle.info_hash())
# Let's us know if we're waiting on a lt alert
self.waiting_on_resume_data = False
# Keep a list of file indexes we're waiting for file_rename alerts on
# This also includes the old_folder and new_folder to know what signal to send
# This is so we can send one folder_renamed signal instead of multiple
# file_renamed signals.
# [(old_folder, new_folder, [*indexes]), ...]
self.waiting_on_folder_rename = []
# We store the filename just in case we need to make a copy of the torrentfile
if not filename:
# If no filename was provided, then just use the infohash
filename = self.torrent_id
self.filename = filename
# Store the magnet uri used to add this torrent if available
self.magnet = magnet
# Holds status info so that we don't need to keep getting it from lt
self.status = self.handle.status()
try:
self.torrent_info = self.handle.get_torrent_info()
except RuntimeError:
self.torrent_info = None
# Default total_uploaded to 0, this may be changed by the state
self.total_uploaded = 0
# Set the default options
self.options = TorrentOptions()
self.options.update(options)
# We need to keep track if the torrent is finished in the state to prevent
# some weird things on state load.
self.is_finished = False
# Load values from state if we have it
if state:
# This is for saving the total uploaded between sessions
self.total_uploaded = state.total_uploaded
# Set the trackers
self.set_trackers(state.trackers)
# Set the filename
self.filename = state.filename
self.is_finished = state.is_finished
else:
# Tracker list
self.trackers = []
# Create a list of trackers
for value in self.handle.trackers():
if lt.version_minor < 15:
tracker = {}
tracker["url"] = value.url
tracker["tier"] = value.tier
else:
tracker = value
self.trackers.append(tracker)
# Various torrent options
self.handle.resolve_countries(True)
self.set_options(self.options)
# Status message holds error info about the torrent
self.statusmsg = "OK"
# The torrents state
self.update_state()
# The tracker status
self.tracker_status = ""
# This gets updated when get_tracker_host is called
self.tracker_host = None
if state:
self.time_added = state.time_added
else:
self.time_added = time.time()
# Keep track of the owner
if state:
self.owner = state.owner
else:
self.owner = owner
# Keep track of last seen complete
if state:
self._last_seen_complete = state.last_seen_complete or 0.0
else:
self._last_seen_complete = 0.0
# Keep track if we're forcing a recheck of the torrent so that we can
# re-pause it after its done if necessary
self.forcing_recheck = False
self.forcing_recheck_paused = False
log.debug("Torrent object created.")
## Options methods ##
def set_options(self, options):
OPTIONS_FUNCS = {
# Functions used for setting options
"auto_managed": self.set_auto_managed,
"download_location": self.set_save_path,
"file_priorities": self.set_file_priorities,
"max_connections": self.handle.set_max_connections,
"max_download_speed": self.set_max_download_speed,
"max_upload_slots": self.handle.set_max_uploads,
"max_upload_speed": self.set_max_upload_speed,
"prioritize_first_last_pieces": self.set_prioritize_first_last,
"sequential_download": self.set_sequential_download
}
for (key, value) in options.items():
if OPTIONS_FUNCS.has_key(key):
OPTIONS_FUNCS[key](value)
self.options.update(options)
def get_options(self):
return self.options
def get_name(self):
if self.handle.has_metadata():
name = self.torrent_info.file_at(0).path.split("/", 1)[0]
if not name:
name = self.torrent_info.name()
try:
return name.decode("utf8", "ignore")
except UnicodeDecodeError:
return name
elif self.magnet:
try:
keys = dict([k.split('=') for k in self.magnet.split('?')[-1].split('&')])
name = keys.get('dn')
if not name:
return self.torrent_id
name = unquote(name).replace('+', ' ')
try:
return name.decode("utf8", "ignore")
except UnicodeDecodeError:
return name
except:
pass
return self.torrent_id
def set_owner(self, account):
self.owner = account
def set_max_connections(self, max_connections):
self.options["max_connections"] = int(max_connections)
self.handle.set_max_connections(max_connections)
def set_max_upload_slots(self, max_slots):
self.options["max_upload_slots"] = int(max_slots)
self.handle.set_max_uploads(max_slots)
def set_max_upload_speed(self, m_up_speed):
self.options["max_upload_speed"] = m_up_speed
if m_up_speed < 0:
v = -1
else:
v = int(m_up_speed * 1024)
self.handle.set_upload_limit(v)
def set_max_download_speed(self, m_down_speed):
self.options["max_download_speed"] = m_down_speed
if m_down_speed < 0:
v = -1
else:
v = int(m_down_speed * 1024)
self.handle.set_download_limit(v)
def set_prioritize_first_last(self, prioritize):
self.options["prioritize_first_last_pieces"] = prioritize
if self.handle.has_metadata():
if self.options["compact_allocation"]:
log.debug("Setting first/last priority with compact "
"allocation does not work!")
return
paths = {}
ti = self.handle.get_torrent_info()
for n in range(ti.num_pieces()):
slices = ti.map_block(n, 0, ti.piece_size(n))
for slice in slices:
fe = ti.file_at(slice.file_index)
paths.setdefault(fe.path, []).append(n)
priorities = self.handle.piece_priorities()
for pieces in paths.itervalues():
two_percent = 2*100/len(pieces)
for piece in pieces[:two_percent]+pieces[-two_percent:]:
priorities[piece] = prioritize and 7 or 1
self.handle.prioritize_pieces(priorities)
def set_sequential_download(self, set_sequencial):
self.options["sequential_download"] = set_sequencial
self.handle.set_sequential_download(set_sequencial)
def set_auto_managed(self, auto_managed):
self.options["auto_managed"] = auto_managed
if not (self.handle.is_paused() and not self.handle.is_auto_managed()):
self.handle.auto_managed(auto_managed)
self.update_state()
def set_stop_ratio(self, stop_ratio):
self.options["stop_ratio"] = stop_ratio
def set_stop_at_ratio(self, stop_at_ratio):
self.options["stop_at_ratio"] = stop_at_ratio
def set_remove_at_ratio(self, remove_at_ratio):
self.options["remove_at_ratio"] = remove_at_ratio
def set_move_completed(self, move_completed):
self.options["move_completed"] = move_completed
def set_move_completed_path(self, move_completed_path):
self.options["move_completed_path"] = move_completed_path
def set_file_priorities(self, file_priorities):
if len(file_priorities) != len(self.get_files()):
log.debug("file_priorities len != num_files")
self.options["file_priorities"] = self.handle.file_priorities()
return
if self.options["compact_allocation"]:
log.debug("setting file priority with compact allocation does not work!")
self.options["file_priorities"] = self.handle.file_priorities()
return
log.debug("setting %s's file priorities: %s", self.torrent_id, file_priorities)
self.handle.prioritize_files(file_priorities)
if 0 in self.options["file_priorities"]:
# We have previously marked a file 'Do Not Download'
# Check to see if we have changed any 0's to >0 and change state accordingly
for index, priority in enumerate(self.options["file_priorities"]):
if priority == 0 and file_priorities[index] > 0:
# We have a changed 'Do Not Download' to a download priority
self.is_finished = False
self.update_state()
break
self.options["file_priorities"] = self.handle.file_priorities()
if self.options["file_priorities"] != list(file_priorities):
log.warning("File priorities were not set for this torrent")
# Set the first/last priorities if needed
self.set_prioritize_first_last(self.options["prioritize_first_last_pieces"])
def set_trackers(self, trackers):
"""Sets trackers"""
if trackers == None:
trackers = []
for value in self.handle.trackers():
tracker = {}
tracker["url"] = value.url
tracker["tier"] = value.tier
trackers.append(tracker)
self.trackers = trackers
self.tracker_host = None
return
log.debug("Setting trackers for %s: %s", self.torrent_id, trackers)
tracker_list = []
for tracker in trackers:
new_entry = lt.announce_entry(str(tracker["url"]))
new_entry.tier = tracker["tier"]
tracker_list.append(new_entry)
self.handle.replace_trackers(tracker_list)
# Print out the trackers
#for t in self.handle.trackers():
# log.debug("tier: %s tracker: %s", t["tier"], t["url"])
# Set the tracker list in the torrent object
self.trackers = trackers
if len(trackers) > 0:
# Force a re-announce if there is at least 1 tracker
self.force_reannounce()
self.tracker_host = None
### End Options methods ###
def set_save_path(self, save_path):
self.options["download_location"] = save_path
def set_tracker_status(self, status):
"""Sets the tracker status"""
self.tracker_status = self.get_tracker_host() + ": " + status
def update_state(self):
"""Updates the state based on what libtorrent's state for the torrent is"""
# Set the initial state based on the lt state
LTSTATE = deluge.common.LT_TORRENT_STATE
ltstate = int(self.handle.status().state)
# Set self.state to the ltstate right away just incase we don't hit some
# of the logic below
if ltstate in LTSTATE:
self.state = LTSTATE[ltstate]
else:
self.state = str(ltstate)
log.debug("set_state_based_on_ltstate: %s", deluge.common.LT_TORRENT_STATE[ltstate])
log.debug("session.is_paused: %s", component.get("Core").session.is_paused())
# First we check for an error from libtorrent, and set the state to that
# if any occurred.
if len(self.handle.status().error) > 0:
# This is an error'd torrent
self.state = "Error"
self.set_status_message(self.handle.status().error)
if self.handle.is_paused():
self.handle.auto_managed(False)
return
if ltstate == LTSTATE["Queued"] or ltstate == LTSTATE["Checking"]:
if self.handle.is_paused():
self.state = "Paused"
else:
self.state = "Checking"
return
elif ltstate == LTSTATE["Downloading"] or ltstate == LTSTATE["Downloading Metadata"]:
self.state = "Downloading"
elif ltstate == LTSTATE["Finished"] or ltstate == LTSTATE["Seeding"]:
self.state = "Seeding"
elif ltstate == LTSTATE["Allocating"]:
self.state = "Allocating"
if self.handle.is_paused() and self.handle.is_auto_managed() and not component.get("Core").session.is_paused():
self.state = "Queued"
elif component.get("Core").session.is_paused() or (self.handle.is_paused() and not self.handle.is_auto_managed()):
self.state = "Paused"
def set_state(self, state):
"""Accepts state strings, ie, "Paused", "Seeding", etc."""
if state not in TORRENT_STATE:
log.debug("Trying to set an invalid state %s", state)
return
self.state = state
return
def set_status_message(self, message):
self.statusmsg = message
def get_eta(self):
"""Returns the ETA in seconds for this torrent"""
if self.status == None:
status = self.handle.status()
else:
status = self.status
if self.is_finished and self.options["stop_at_ratio"]:
# We're a seed, so calculate the time to the 'stop_share_ratio'
if not status.upload_payload_rate:
return 0
stop_ratio = self.options["stop_ratio"]
return ((status.all_time_download * stop_ratio) - status.all_time_upload) / status.upload_payload_rate
left = status.total_wanted - status.total_wanted_done
if left <= 0 or status.download_payload_rate == 0:
return 0
try:
eta = left / status.download_payload_rate
except ZeroDivisionError:
eta = 0
return eta
def get_ratio(self):
"""Returns the ratio for this torrent"""
if self.status == None:
status = self.handle.status()
else:
status = self.status
if status.total_done > 0:
# We use 'total_done' if the downloaded value is 0
downloaded = status.total_done
else:
# Return -1.0 to signify infinity
return -1.0
return float(status.all_time_upload) / float(downloaded)
def get_files(self):
"""Returns a list of files this torrent contains"""
if self.torrent_info == None and self.handle.has_metadata():
torrent_info = self.handle.get_torrent_info()
else:
torrent_info = self.torrent_info
if not torrent_info:
return []
ret = []
files = torrent_info.files()
for index, file in enumerate(files):
ret.append({
'index': index,
'path': file.path.decode("utf8", "ignore"),
'size': file.size,
'offset': file.offset
})
return ret
def get_peers(self):
"""Returns a list of peers and various information about them"""
ret = []
peers = self.handle.get_peer_info()
for peer in peers:
# We do not want to report peers that are half-connected
if peer.flags & peer.connecting or peer.flags & peer.handshake:
continue
try:
client = str(peer.client).decode("utf-8")
except UnicodeDecodeError:
client = str(peer.client).decode("latin-1")
# Make country a proper string
country = str()
for c in peer.country:
if not c.isalpha():
country += " "
else:
country += c
ret.append({
"client": client,
"country": country,
"down_speed": peer.payload_down_speed,
"ip": "%s:%s" % (peer.ip[0], peer.ip[1]),
"progress": peer.progress,
"seed": peer.flags & peer.seed,
"up_speed": peer.payload_up_speed,
})
return ret
def get_queue_position(self):
"""Returns the torrents queue position"""
return self.handle.queue_position()
def get_file_progress(self):
"""Returns the file progress as a list of floats.. 0.0 -> 1.0"""
if not self.handle.has_metadata():
return 0.0
file_progress = self.handle.file_progress()
ret = []
for i,f in enumerate(self.get_files()):
try:
ret.append(float(file_progress[i]) / float(f["size"]))
except ZeroDivisionError:
ret.append(0.0)
return ret
def get_tracker_host(self):
"""Returns just the hostname of the currently connected tracker
if no tracker is connected, it uses the 1st tracker."""
if self.tracker_host:
return self.tracker_host
if not self.status:
self.status = self.handle.status()
tracker = self.status.current_tracker
if not tracker and self.trackers:
tracker = self.trackers[0]["url"]
if tracker:
url = urlparse(tracker.replace("udp://", "http://"))
if hasattr(url, "hostname"):
host = (url.hostname or 'DHT')
# Check if hostname is an IP address and just return it if that's the case
import socket
try:
socket.inet_aton(host)
except socket.error:
pass
else:
# This is an IP address because an exception wasn't raised
return url.hostname
parts = host.split(".")
if len(parts) > 2:
if parts[-2] in ("co", "com", "net", "org") or parts[-1] in ("uk"):
host = ".".join(parts[-3:])
else:
host = ".".join(parts[-2:])
self.tracker_host = host
return host
return ""
def get_last_seen_complete(self):
"""
Returns the time a torrent was last seen complete, ie, with all pieces
available.
"""
if lt.version_minor > 15:
return self.status.last_seen_complete
self.calculate_last_seen_complete()
return self._last_seen_complete
def get_status(self, keys, diff=False):
"""
Returns the status of the torrent based on the keys provided
:param keys: the keys to get the status on
:type keys: list of str
:param diff: if True, will return a diff of the changes since the last
call to get_status based on the session_id
:type diff: bool
:returns: a dictionary of the status keys and their values
:rtype: dict
"""
# Create the full dictionary
self.status = self.handle.status()
if self.handle.has_metadata():
self.torrent_info = self.handle.get_torrent_info()
# Adjust progress to be 0-100 value
progress = self.status.progress * 100
# Adjust status.distributed_copies to return a non-negative value
distributed_copies = self.status.distributed_copies
if distributed_copies < 0:
distributed_copies = 0.0
# Calculate the seeds:peers ratio
if self.status.num_incomplete == 0:
# Use -1.0 to signify infinity
seeds_peers_ratio = -1.0
else:
seeds_peers_ratio = self.status.num_complete / float(self.status.num_incomplete)
full_status = {
"active_time": self.status.active_time,
"all_time_download": self.status.all_time_download,
"compact": self.options["compact_allocation"],
"distributed_copies": distributed_copies,
"download_payload_rate": self.status.download_payload_rate,
"file_priorities": self.options["file_priorities"],
"hash": self.torrent_id,
"is_auto_managed": self.options["auto_managed"],
"is_finished": self.is_finished,
"max_connections": self.options["max_connections"],
"max_download_speed": self.options["max_download_speed"],
"max_upload_slots": self.options["max_upload_slots"],
"max_upload_speed": self.options["max_upload_speed"],
"message": self.statusmsg,
"move_on_completed_path": self.options["move_completed_path"],
"move_on_completed": self.options["move_completed"],
"move_completed_path": self.options["move_completed_path"],
"move_completed": self.options["move_completed"],
"next_announce": self.status.next_announce.seconds,
"num_peers": self.status.num_peers - self.status.num_seeds,
"num_seeds": self.status.num_seeds,
"owner": self.owner,
"paused": self.status.paused,
"prioritize_first_last": self.options["prioritize_first_last_pieces"],
"sequential_download": self.options["sequential_download"],
"progress": progress,
"shared": self.options["shared"],
"remove_at_ratio": self.options["remove_at_ratio"],
"save_path": self.options["download_location"],
"seeding_time": self.status.seeding_time,
"seeds_peers_ratio": seeds_peers_ratio,
"seed_rank": self.status.seed_rank,
"state": self.state,
"stop_at_ratio": self.options["stop_at_ratio"],
"stop_ratio": self.options["stop_ratio"],
"time_added": self.time_added,
"total_done": self.status.total_done,
"total_payload_download": self.status.total_payload_download,
"total_payload_upload": self.status.total_payload_upload,
"total_peers": self.status.num_incomplete,
"total_seeds": self.status.num_complete,
"total_uploaded": self.status.all_time_upload,
"total_wanted": self.status.total_wanted,
"tracker": self.status.current_tracker,
"trackers": self.trackers,
"tracker_status": self.tracker_status,
"upload_payload_rate": self.status.upload_payload_rate
}
def ti_comment():
if self.handle.has_metadata():
try:
return self.torrent_info.comment().decode("utf8", "ignore")
except UnicodeDecodeError:
return self.torrent_info.comment()
return ""
def ti_priv():
if self.handle.has_metadata():
return self.torrent_info.priv()
return False
def ti_total_size():
if self.handle.has_metadata():
return self.torrent_info.total_size()
return 0
def ti_num_files():
if self.handle.has_metadata():
return self.torrent_info.num_files()
return 0
def ti_num_pieces():
if self.handle.has_metadata():
return self.torrent_info.num_pieces()
return 0
def ti_piece_length():
if self.handle.has_metadata():
return self.torrent_info.piece_length()
return 0
def ti_pieces_info():
if self.handle.has_metadata():
return self.get_pieces_info()
return None
fns = {
"comment": ti_comment,
"eta": self.get_eta,
"file_progress": self.get_file_progress,
"files": self.get_files,
"is_seed": self.handle.is_seed,
"name": self.get_name,
"num_files": ti_num_files,
"num_pieces": ti_num_pieces,
"pieces": ti_pieces_info,
"peers": self.get_peers,
"piece_length": ti_piece_length,
"private": ti_priv,
"queue": self.handle.queue_position,
"ratio": self.get_ratio,
"total_size": ti_total_size,
"tracker_host": self.get_tracker_host,
"last_seen_complete": self.get_last_seen_complete
}
# Create the desired status dictionary and return it
status_dict = {}
if len(keys) == 0:
status_dict = full_status
for key in fns:
status_dict[key] = fns[key]()
else:
for key in keys:
if key in full_status:
status_dict[key] = full_status[key]
elif key in fns:
status_dict[key] = fns[key]()
session_id = self.rpcserver.get_session_id()
if diff:
if session_id in self.prev_status:
# We have a previous status dict, so lets make a diff
status_diff = {}
for key, value in status_dict.items():
if key in self.prev_status[session_id]:
if value != self.prev_status[session_id][key]:
status_diff[key] = value
else:
status_diff[key] = value
self.prev_status[session_id] = status_dict
return status_diff
self.prev_status[session_id] = status_dict
return status_dict
return status_dict
def apply_options(self):
"""Applies the per-torrent options that are set."""
self.handle.set_max_connections(self.max_connections)
self.handle.set_max_uploads(self.max_upload_slots)
self.handle.set_upload_limit(int(self.max_upload_speed * 1024))
self.handle.set_download_limit(int(self.max_download_speed * 1024))
self.handle.prioritize_files(self.file_priorities)
self.handle.set_sequential_download(self.options["sequential_download"])
self.handle.resolve_countries(True)
def pause(self):
"""Pause this torrent"""
# Turn off auto-management so the torrent will not be unpaused by lt queueing
self.handle.auto_managed(False)
if self.handle.is_paused():
# This torrent was probably paused due to being auto managed by lt
# Since we turned auto_managed off, we should update the state which should
# show it as 'Paused'. We need to emit a torrent_paused signal because
# the torrent_paused alert from libtorrent will not be generated.
self.update_state()
component.get("EventManager").emit(TorrentStateChangedEvent(self.torrent_id, "Paused"))
else:
try:
self.handle.pause()
except Exception, e:
log.debug("Unable to pause torrent: %s", e)
return False
return True
def resume(self):
"""Resumes this torrent"""
if self.handle.is_paused() and self.handle.is_auto_managed():
log.debug("Torrent is being auto-managed, cannot resume!")
return
else:
# Reset the status message just in case of resuming an Error'd torrent
self.set_status_message("OK")
if self.handle.is_finished():
# If the torrent has already reached it's 'stop_seed_ratio' then do not do anything
if self.options["stop_at_ratio"]:
if self.get_ratio() >= self.options["stop_ratio"]:
#XXX: This should just be returned in the RPC Response, no event
#self.signals.emit_event("torrent_resume_at_stop_ratio")
return
if self.options["auto_managed"]:
# This torrent is to be auto-managed by lt queueing
self.handle.auto_managed(True)
try:
self.handle.resume()
except:
pass
return True
def connect_peer(self, ip, port):
"""adds manual peer"""
try:
self.handle.connect_peer((ip, int(port)), 0)
except Exception, e:
log.debug("Unable to connect to peer: %s", e)
return False
return True
def move_storage(self, dest):
"""Move a torrent's storage location"""
if deluge.common.windows_check():
# Attempt to convert utf8 path to unicode
# Note: Inconsistent encoding for 'dest', needs future investigation
try:
dest_u = unicode(dest, "utf-8")
except TypeError:
# String is already unicode
dest_u = dest
else:
dest_u = dest
if not os.path.exists(dest_u):
try:
# Try to make the destination path if it doesn't exist
os.makedirs(dest_u)
except IOError, e:
log.exception(e)
log.error("Could not move storage for torrent %s since %s does "
"not exist and could not create the directory.",
self.torrent_id, dest_u)
return False
try:
self.handle.move_storage(dest_u)
except:
return False
return True
def save_resume_data(self):
"""Signals libtorrent to build resume data for this torrent, it gets
returned in a libtorrent alert"""
self.handle.save_resume_data()
self.waiting_on_resume_data = True
def write_torrentfile(self):
"""Writes the torrent file"""
path = "%s/%s.torrent" % (
os.path.join(get_config_dir(), "state"),
self.torrent_id)
log.debug("Writing torrent file: %s", path)
try:
self.torrent_info = self.handle.get_torrent_info()
# Regenerate the file priorities
self.set_file_priorities([])
md = lt.bdecode(self.torrent_info.metadata())
torrent_file = {}
torrent_file["info"] = md
open(path, "wb").write(lt.bencode(torrent_file))
except Exception, e:
log.warning("Unable to save torrent file: %s", e)
def delete_torrentfile(self):
"""Deletes the .torrent file in the state"""
path = "%s/%s.torrent" % (
os.path.join(get_config_dir(), "state"),
self.torrent_id)
log.debug("Deleting torrent file: %s", path)
try:
os.remove(path)
except Exception, e:
log.warning("Unable to delete the torrent file: %s", e)
def force_reannounce(self):
"""Force a tracker reannounce"""
try:
self.handle.force_reannounce()
except Exception, e:
log.debug("Unable to force reannounce: %s", e)
return False
return True
def scrape_tracker(self):
"""Scrape the tracker"""
try:
self.handle.scrape_tracker()
except Exception, e:
log.debug("Unable to scrape tracker: %s", e)
return False
return True
def force_recheck(self):
"""Forces a recheck of the torrents pieces"""
paused = self.handle.is_paused()
try:
self.handle.force_recheck()
self.handle.resume()
except Exception, e:
log.debug("Unable to force recheck: %s", e)
return False
self.forcing_recheck = True
self.forcing_recheck_paused = paused
return True
def rename_files(self, filenames):
"""Renames files in the torrent. 'filenames' should be a list of
(index, filename) pairs."""
for index, filename in filenames:
filename = sanitize_filepath(filename)
self.handle.rename_file(index, filename.encode("utf-8"))
def rename_folder(self, folder, new_folder):
"""Renames a folder within a torrent. This basically does a file rename
on all of the folders children."""
log.debug("attempting to rename folder: %s to %s", folder, new_folder)
if len(new_folder) < 1:
log.error("Attempting to rename a folder with an invalid folder name: %s", new_folder)
return
new_folder = sanitize_filepath(new_folder, folder=True)
wait_on_folder = (folder, new_folder, [])
for f in self.get_files():
if f["path"].startswith(folder):
# Keep a list of filerenames we're waiting on
wait_on_folder[2].append(f["index"])
self.handle.rename_file(f["index"], f["path"].replace(folder, new_folder, 1).encode("utf-8"))
self.waiting_on_folder_rename.append(wait_on_folder)
def cleanup_prev_status(self):
"""
This method gets called to check the validity of the keys in the prev_status
dict. If the key is no longer valid, the dict will be deleted.
"""
for key in self.prev_status.keys():
if not self.rpcserver.is_session_valid(key):
del self.prev_status[key]
def calculate_last_seen_complete(self):
if self._last_seen_complete+60 > time.time():
# Simple caching. Only calculate every 1 min at minimum
return self._last_seen_complete
availability = self.handle.piece_availability()
if filter(lambda x: x<1, availability):
# Torrent does not have all the pieces
return
log.trace("Torrent %s has all the pieces. Setting last seen complete.",
self.torrent_id)
self._last_seen_complete = time.time()
def get_pieces_info(self):
pieces = {}
# First get the pieces availability.
availability = self.handle.piece_availability()
# Pieces from connected peers
for peer_info in self.handle.get_peer_info():
if peer_info.downloading_piece_index < 0:
# No piece index, then we're not downloading anything from
# this peer
continue
pieces[peer_info.downloading_piece_index] = 2
# Now, the rest of the pieces
for idx, piece in enumerate(self.handle.status().pieces):
if idx in pieces:
# Piece beeing downloaded, handled above
continue
elif piece:
# Completed Piece
pieces[idx] = 3
continue
elif availability[idx] > 0:
# Piece not downloaded nor beeing downloaded but available
pieces[idx] = 1
continue
# If we reached here, it means the piece is missing, ie, there's
# no known peer with this piece, or this piece has not been asked
# for so far.
pieces[idx] = 0
sorted_indexes = pieces.keys()
sorted_indexes.sort()
# Return only the piece states, no need for the piece index
# Keep the order
return [pieces[idx] for idx in sorted_indexes]
| gpl-3.0 |
jmesteve/saas3 | openerp/addons/stock/report/stock_inventory_move_report.py | 63 | 1789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class stock_inventory_move(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(stock_inventory_move, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'qty_total':self._qty_total
})
def _qty_total(self, objects):
total = 0.0
uom = objects[0].product_uom.name
for obj in objects:
total += obj.product_qty
return {'quantity':total,'uom':uom}
report_sxw.report_sxw(
'report.stock.inventory.move',
'stock.inventory',
'addons/stock/report/stock_inventory_move.rml',
parser=stock_inventory_move,
header='internal'
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/openshift_health_checker/openshift_checks/logging/logging.py | 35 | 3827 | """
Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana stack
"""
import json
import os
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class MissingComponentPods(OpenShiftCheckException):
"""Raised when a component has no pods in the namespace."""
pass
class CouldNotUseOc(OpenShiftCheckException):
"""Raised when ocutil has a failure running oc."""
pass
class LoggingCheck(OpenShiftCheck):
"""Base class for OpenShift aggregated logging component checks"""
# FIXME: this should not be listed as a check, since it is not meant to be
# run by itself.
name = "logging"
def is_active(self):
logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False)
return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
def run(self):
return {}
def get_pods_for_component(self, logging_component):
"""Get all pods for a given component. Returns: list of pods."""
pod_output = self.exec_oc(
"get pods -l component={} -o json".format(logging_component),
[],
)
try:
pods = json.loads(pod_output) # raises ValueError if deserialize fails
if not pods or not pods.get('items'): # also a broken response, treat the same
raise ValueError()
except ValueError:
# successful run but non-parsing data generally means there were no pods to be found
raise MissingComponentPods(
'There are no "{}" component pods in the "{}" namespace.\n'
'Is logging deployed?'.format(logging_component, self.logging_namespace())
)
return pods['items']
@staticmethod
def not_running_pods(pods):
"""Returns: list of pods not in a ready and running state"""
return [
pod for pod in pods
if not pod.get("status", {}).get("containerStatuses") or any(
container['ready'] is False
for container in pod['status']['containerStatuses']
) or not any(
condition['type'] == 'Ready' and condition['status'] == 'True'
for condition in pod['status'].get('conditions', [])
)
]
def logging_namespace(self):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
or raises CouldNotUseOc on error
"""
config_base = self.get_var("openshift", "common", "config_base")
args = {
"namespace": self.logging_namespace(),
"config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
"cmd": cmd_str,
"extra_args": list(extra_args) if extra_args else [],
}
result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
"This host is supposed to be a master but does not have the `oc` command where expected.\n"
"Has an installation been run on this host yet?"
)
raise CouldNotUseOc(
'Unexpected error using `oc` to validate the logging stack components.\n'
'Error executing `oc {cmd}`:\n'
'{error}'.format(cmd=args['cmd'], error=result['result'])
)
return result.get("result", "")
| apache-2.0 |
hiaselhans/OpenGlider | openglider/glider/parametric/import_ods.py | 2 | 15496 | from __future__ import division
import numbers
import re
import ezodf
import numpy as np
import logging
import typing
from openglider.airfoil import BezierProfile2D, Profile2D
from openglider.vector.spline import Bezier, SymmetricBezier, SymmetricBSpline
from openglider.vector import Interpolation
from openglider.glider.parametric.arc import ArcCurve
from openglider.glider.parametric.shape import ParametricShape
from openglider.glider.parametric.lines import UpperNode2D, LowerNode2D, BatchNode2D, Line2D, LineSet2D
from openglider.glider.rib import MiniRib
from openglider.glider.ballooning import BallooningBezier, BallooningBezierNeu
from openglider.utils.table import Table
logger = logging.getLogger(__name__)
element_keywords = {
"cuts": ["cells", "left", "right", "type"],
"a": "",
}
def filter_elements_from_table(table: Table, key: str, length: int):
new_table = Table()
for column in range(table.num_columns):
if table[0, column] == key:
new_table.append_right(table.get_columns(column, column+length-1))
return new_table
def import_ods_2d(Glider2D, filename, numpoints=4, calc_lineset_nodes=False):
logger.info(f"Import file: {filename}")
ods = ezodf.opendoc(filename)
sheets = ods.sheets
tables = Table.load(filename)
cell_sheet = tables[1]
rib_sheet = tables[2]
# file-version
file_version_match = re.match(r"V([0-9]*)", str(cell_sheet["A1"]))
if file_version_match:
file_version = int(file_version_match.group(1))
else:
file_version = 1
logger.info(f"Loading file version {file_version}")
# ------------
# profiles = [BezierProfile2D(profile) for profile in transpose_columns(sheets[3])]
profiles = [Profile2D(profile, name) for name, profile in transpose_columns(sheets[3])]
for foil in profiles:
foil.normalize()
if file_version > 2:
has_center_cell = not tables[0][0, 0] == 0
cell_no = (tables[0].num_rows - 2) * 2 + has_center_cell
geometry = get_geometry_parametric(tables[5], cell_no)
else:
geometry = get_geometry_explicit(sheets[0])
has_center_cell = geometry["shape"].has_center_cell
balloonings = []
for i, (name, baloon) in enumerate(transpose_columns(sheets[4])):
ballooning_type = str(sheets[4][0,2*i+1].value).upper()
if baloon:
if ballooning_type == "V1":
i = 0
while baloon[i + 1][0] > baloon[i][0]:
i += 1
upper = baloon[:i + 1]
lower = [(x, -y) for x, y in baloon[i + 1:]]
ballooning = BallooningBezier(upper, lower, name=name)
balloonings.append(BallooningBezierNeu.from_classic(ballooning))
elif ballooning_type == "V2":
i = 0
while baloon[i + 1][0] > baloon[i][0]:
i += 1
upper = baloon[:i + 1]
lower = baloon[i + 1:]
ballooning = BallooningBezier(upper, lower, name=name)
balloonings.append(BallooningBezierNeu.from_classic(ballooning))
elif ballooning_type == "V3":
balloonings.append(BallooningBezierNeu(baloon))
else:
raise ValueError("No ballooning type specified")
data = {}
datasheet = tables[-1]
for row in range(datasheet.num_rows):
name = datasheet[row, 0]
if name:
data[name] = datasheet[row, 1]
attachment_points_cell_table = filter_elements_from_table(cell_sheet, "ATP", 4)
attachment_points_cell_table.append_right(filter_elements_from_table(cell_sheet, "AHP", 4))
attachment_points_rib_table = filter_elements_from_table(rib_sheet, "AHP", 3)
attachment_points_rib_table.append_right(filter_elements_from_table(rib_sheet, "ATP", 3))
attachment_points = LineSet2D.read_attachment_point_table(
cell_table=attachment_points_cell_table,
rib_table=attachment_points_rib_table,
half_cell_no=geometry["shape"].half_cell_num
)
attachment_points = {n.name: n for n in attachment_points}
attachment_points_lower = get_lower_aufhaengepunkte(data)
def get_grouped_elements(sheet, names, keywords):
group_kw = keywords[0]
elements = []
for name in names:
elements += read_elements(sheet, name, len_data=len(keywords)-1)
element_dct = to_dct(elements, keywords)
return group(element_dct, group_kw)
# RIB HOLES
rib_hole_keywords = ["ribs", "pos", "size"]
rib_holes = read_elements(rib_sheet, "QUERLOCH", len_data=2)
rib_holes += read_elements(rib_sheet, "HOLE", len_data=2)
rib_holes = to_dct(rib_holes, rib_hole_keywords)
rib_holes = group(rib_holes, "ribs")
rigidfoil_keywords = ["ribs", "start", "end", "distance"]
rigidfoils = read_elements(rib_sheet, "RIGIDFOIL", len_data=3)
rigidfoils = to_dct(rigidfoils, rigidfoil_keywords)
rigidfoils = group(rigidfoils, "ribs")
cell_rigidfoils = get_grouped_elements(
cell_sheet,
["RIGIDFOIL"],
["cells", "x_start", "x_end", "y"]
)
# CUTS
def get_cuts(names, target_name):
objs = []
for name_src in names:
objs += read_elements(cell_sheet, name_src, len_data=2)
cuts_this = [{"cells": cut[0], "left": float(cut[1]), "right": float(cut[2]), "type": target_name} for cut in
objs]
return group(cuts_this, "cells")
cuts = get_cuts(["EKV", "EKH", "folded"], "folded")
cuts += get_cuts(["DESIGNM", "DESIGNO", "orthogonal"], "orthogonal")
cuts += get_cuts(["CUT3D", "cut_3d"], "cut_3d")
cuts += get_cuts(["singleskin"], "singleskin")
# Diagonals: center_left, center_right, width_l, width_r, height_l, height_r
diagonals = []
for res in read_elements(cell_sheet, "QR", len_data=6):
height1 = res[5]
height2 = res[6]
# migration
if file_version == 1:
# height (0,1) -> (-1,1)
height1 = height1 * 2 - 1
height2 = height2 * 2 - 1
# ---------
diagonals.append({"left_front": (res[1] - res[3] / 2, height1),
"left_back": (res[1] + res[3] / 2, height1),
"right_front": (res[2] - res[4] / 2, height2),
"right_back": (res[2] + res[4] / 2, height2),
"cells": res[0]})
diagonals = group(diagonals, "cells")
straps = []
straps_keywords = ["cells", "left", "right"]
for res in read_elements(cell_sheet, "VEKTLAENGE", len_data=2):
straps.append({
"left": res[1],
"right": res[2],
"width": 0.02,
"cells": res[0]
})
for res in read_elements(cell_sheet, "STRAP", len_data=3):
# [cell_no, x_left, x_right, width]
straps.append({
"left": res[1],
"right": res[2],
"width": res[3],
"cells": res[0]
})
straps = group(straps, "cells")
materials = get_material_codes(cell_sheet)
# minirib -> y, start (x)
miniribs = []
for minirib in read_elements(cell_sheet, "MINIRIB", len_data=2):
miniribs.append({
"yvalue": minirib[1],
"front_cut": minirib[2],
"cells": minirib[0]
})
miniribs = group(miniribs, "cells")
lineset_table = tables[6]
lineset = LineSet2D.read_input_table(lineset_table, attachment_points_lower, attachment_points)
glider_2d = Glider2D(elements={"cuts": cuts,
"holes": rib_holes,
"diagonals": diagonals,
"rigidfoils": rigidfoils,
"cell_rigidfoils": cell_rigidfoils,
"straps": straps,
"materials": materials,
"miniribs": miniribs},
profiles=profiles,
balloonings=balloonings,
lineset=lineset,
speed=data["SPEED"],
glide=data["GLIDE"],
**geometry)
if calc_lineset_nodes:
glider_3d = glider_2d.get_glider_3d()
glider_2d.lineset.set_default_nodes2d_pos(glider_3d)
return glider_2d
def get_geometry_explicit(sheet):
# All Lists
front = []
back = []
cell_distribution = []
aoa = []
arc = []
profile_merge = []
ballooning_merge = []
zrot = []
y = z = span_last = alpha = 0.
for i in range(1, sheet.nrows()):
line = [sheet.get_cell([i, j]).value for j in range(sheet.ncols())]
if not line[0]:
break # skip empty line
if not all(isinstance(c, numbers.Number) for c in line[:10]):
raise ValueError("Invalid row ({}): {}".format(i, line))
# Index, Choord, Span(x_2d), Front(y_2d=x_3d), d_alpha(next), aoa,
chord = line[1]
span = line[2]
x = line[3]
y += np.cos(alpha) * (span - span_last)
z -= np.sin(alpha) * (span - span_last)
alpha += line[4] * np.pi / 180 # angle after the rib
aoa.append([span, line[5] * np.pi / 180])
arc.append([y, z])
front.append([span, -x])
back.append([span, -x - chord])
cell_distribution.append([span, i - 1])
profile_merge.append([span, line[8]])
ballooning_merge.append([span, line[9]])
zrot.append([span, line[7] * np.pi / 180])
span_last = span
def symmetric_fit(data, bspline=True):
not_from_center = int(data[0][0] == 0)
mirrored = [[-p[0], p[1]] for p in data[not_from_center:]][::-1] + data
if bspline:
return SymmetricBSpline.fit(mirrored)
else:
return SymmetricBezier.fit(mirrored)
has_center_cell = not front[0][0] == 0
cell_no = (len(front) - 1) * 2 + has_center_cell
start = (2 - has_center_cell) / cell_no
const_arr = [0.] + np.linspace(start, 1, len(front) - (not has_center_cell)).tolist()
rib_pos = [0.] + [p[0] for p in front[not has_center_cell:]]
rib_pos_int = Interpolation(zip(rib_pos, const_arr))
rib_distribution = [[i, rib_pos_int(i)] for i in np.linspace(0, rib_pos[-1], 30)]
rib_distribution = Bezier.fit(rib_distribution)
parametric_shape = ParametricShape(symmetric_fit(front), symmetric_fit(back), rib_distribution, cell_no)
arc_curve = ArcCurve(symmetric_fit(arc))
return {
"shape": parametric_shape,
"arc": arc_curve,
"aoa": symmetric_fit(aoa),
"zrot": symmetric_fit(zrot),
"profile_merge_curve": symmetric_fit(profile_merge, bspline=True),
"ballooning_merge_curve": symmetric_fit(ballooning_merge, bspline=True)
}
def get_geometry_parametric(table: Table, cell_num):
data = {}
for key in ("front", "back", "rib_distribution", "arc", "zrot", "aoa", "profile_merge_curve", "ballooning_merge_curve"):
column = None
for col in range(table.num_columns):
if table[0, col] == key:
column = col
if column is not None:
points = []
for row in range(1, table.num_rows):
if table[row, column] is not None:
points.append([table[row, column], table[row, column+1]])
data[key] = points
parametric_shape = ParametricShape(
SymmetricBSpline(data["front"]),
SymmetricBSpline(data["back"]),
Bezier(data["rib_distribution"]),
cell_num
)
arc_curve = ArcCurve(SymmetricBSpline(data["arc"]))
return {
"shape": parametric_shape,
"arc": arc_curve,
"aoa": SymmetricBSpline(data["aoa"]),
"zrot": SymmetricBSpline(data["zrot"]),
"profile_merge_curve": SymmetricBSpline(data["profile_merge_curve"]),
"ballooning_merge_curve": SymmetricBSpline(data["ballooning_merge_curve"])
}
def get_material_codes(sheet):
materials = read_elements(sheet, "MATERIAL", len_data=1)
i = 0
ret = []
while materials:
codes = [el[1] for el in materials if el[0] == i]
materials = [el for el in materials if el[0] != i]
ret.append(codes)
i += 1
# cell_no, part_no, code
return ret
def get_lower_aufhaengepunkte(data):
aufhaengepunkte = {}
axis_to_index = {"X": 0, "Y": 1, "Z": 2}
regex = re.compile("AHP([XYZ])(.*)")
for key in data:
if key is not None:
res = regex.match(key)
if res:
axis, pos = res.groups()
aufhaengepunkte.setdefault(pos, [0, 0, 0])
aufhaengepunkte[pos][axis_to_index[axis]] = data[key]
return {name: LowerNode2D([0, 0], pos, name)
for name, pos in aufhaengepunkte.items()}
def transpose_columns(sheet, columnswidth=2):
num_columns = sheet.ncols()
num_elems = num_columns // columnswidth
# if num % columnswidth > 0:
# raise ValueError("irregular columnswidth")
result = []
for col in range(num_elems):
first_column = col * columnswidth
last_column = (col + 1) * columnswidth
columns = range(first_column, last_column)
name = sheet[0, first_column].value
if not isinstance(name, numbers.Number): # py2/3: str!=unicode
start = 1
else:
name = "unnamed"
start = 0
element = []
for i in range(start, sheet.nrows()):
row = [sheet[i, j].value for j in columns]
if all([j is None for j in row]): # Break at empty line
break
if not all([isinstance(j, numbers.Number) for j in row]):
raise ValueError("Invalid value at row {}: {}".format(i, row))
element.append(row)
result.append((name, element))
return result
def read_elements(sheet: Table, keyword, len_data=2):
"""
Return rib/cell_no for the element + data
-> read_elements(sheet, "AHP", 2) -> [ [rib_no, id, x], ...]
"""
elements = []
column = 0
while column < sheet.num_columns:
if sheet[0, column] == keyword:
for row in range(1, sheet.num_rows):
line = [sheet[row, column + k] for k in range(len_data)]
if line[0]:
line.insert(0, row-1)
elements.append(line)
column += len_data
else:
column += 1
return elements
def to_dct(elems, keywords):
return [{key: value for key, value in zip(keywords, elem)} for elem in elems]
def group(lst, keyword):
new_lst = []
def equal(first, second):
if first.keys() != second.keys():
return False
for key in first:
if key == keyword:
continue
if first[key] != second[key]:
return False
return True
def insert(_obj):
for obj2 in new_lst:
if equal(_obj, obj2):
obj2[keyword] += _obj[keyword]
return
# nothing found
new_lst.append(_obj)
for obj in lst:
# create a list to group
obj[keyword] = [obj[keyword]]
insert(obj)
return new_lst
| gpl-3.0 |
KarchinLab/2020plus | src/classify/python/vogelstein_classifier.py | 1 | 7309 | from __future__ import division
import numpy as np
class VogelsteinClassifier(object):
"""Oncogene and TSG classifier based on the 20/20 rule.
Essentially the 20/20 rule states that oncogenes have at least
20% recurrent missense mutations. While tumor suppressor genes
have atleast 20% deleterius mutations. This is a simple rule-
based classifier. To reduce errors for genes with low mutation
counts, vogelstein et al. manually curated genes with between
7 and 18 mutations. This class can not reproduce their manual
curation but can give an estimate on the accuracy of a naive
implementation of the 20/20 rule. The threshold of 20% is also
changeable.
Information on the 20/20 rule from vogelstein's science paper:
http://www.sciencemag.org/content/339/6127/1546.full
"""
def __init__(self,
onco_threshold=.2,
tsg_threshold=.2,
kind='vogelstein',
min_count=0,
tsg_min=7,
onco_min=10,
db_size=404863): # db size is as reported from Cancer Genome Landscapes paper
# check valid percentage
if not 0 < onco_threshold < 1:
raise ValueError("Oncogene threshold is invalid")
if not 0 < tsg_threshold < 1:
raise ValueError("TSG threshold is invalid")
self.kind = kind # either 'vogelstein' or 'min'
# set parameters as reported in Cancer genome landscapes paper
self.db_size = db_size
self.db_tsg_min = tsg_min
self.db_onco_min = onco_min
# assign percentage thresholds
self.onco_threshold = onco_threshold
self.tsg_threshold = tsg_threshold
# set min count to classify gene
self.min_count = min_count
self.tsg_min = tsg_min
self.onco_min = onco_min
# labels to classify genes as
self.onco_label = "oncogene"
self.tsg_label = "tsg"
self.other_label = "other"
def _subsample_count(recur_ct, del_ct, total_ct, desired_ct):
if total_ct <= desired_ct:
# no need for subsampling
return recur_ct, del_ct, total_ct
else:
# sub-sample to desired number of counts
prng = np.random.RandomState()
ct_array = np.array([recur_ct, del_ct,
total_ct - (recur_ct + del_ct)])
prob = ct_array.astype(float) / ct_array.sum()
multinomial_sample = prng.multinomial(desired_ct, # total counts for multinomial
prob) # probability
return multinomial_sample
def predict_list(self, input_list,
kind='count',
scale_type=None,
subsample=None):
"""Predict a list of inputs as either oncogene/tsg/other.
**Parameters**
input_list : list of list
list containing a list of recurrent count,
deleterious count or %, and total count or %, in that
order.
kind : str ('count' | 'percent')
whether recur/del are percents or counts
scale_type : str (None | 'linear')
whether to scale count thresholds based on size of database
based on cancer genome landscapes paper
subsample : (None | int)
whether to subsample total mutations to a certain number of
mutations.
"""
# scale count thresholds
all_cts = sum([x[-1] for x in input_list])
if scale_type:
self.tsg_min = self.db_tsg_min * float(all_cts)/self.db_size
self.onco_min = self.db_onco_min * float(all_cts)/self.db_size
else:
self.tsg_min = self.db_tsg_min
self.onco_min = self.db_onco_min
# perform prediction
gene_class_list = []
if kind == 'count':
for recur_ct, del_ct, total_ct in input_list:
tmp_gene_class = self.predict_by_cts(recur_ct,
del_ct,
total_ct)
gene_class_list.append(tmp_gene_class)
else:
for recur_pct, del_pct, total_cts in input_list:
tmp_gene_class = self.predict_by_pct(recur_pct,
del_pct,
total_cts)
gene_class_list.append(tmp_gene_class)
return gene_class_list
def predict_by_cts(self, recurrent, deleterious, total):
"""Predicts oncogene/tsg/other by gene mutation counts."""
if total < self.min_count:
# too few mutations case
return self.other_label
# sufficient number of counts
recur_perc = recurrent / float(total)
del_perc = deleterious / float(total)
gene_class = self.predict_by_pct(recur_perc,
del_perc,
total)
return gene_class
def predict_by_pct(self, recur_pct, del_pct, total):
"""The actual 20/20 rule logic to classify genes."""
# calc counts
recur_ct = recur_pct * total
del_ct = del_pct * total
# 20/20 rule logic
if self.kind == 'vogelstein':
if recur_pct >= self.onco_threshold and recur_ct >= self.onco_min:
if del_pct <= .05:
return self.onco_label
elif del_ct >= self.tsg_min:
return self.tsg_label
else:
return self.other_label
elif del_pct >= self.tsg_threshold and del_ct >= self.tsg_min:
return self.tsg_label
else:
return self.other_label
elif self.kind == 'min':
if total < self.min_count:
# too few mutations case
return self.other_label
# if recur_pct >= self.onco_threshold and (total*recur_pct) >= self.min_count:
elif recur_pct >= self.onco_threshold:
# high number of recurrent missense case
if recur_pct >= del_pct:
return self.onco_label
else:
return self.tsg_label
elif del_pct >= self.tsg_threshold:
# high number of deleterious mutations case
return self.tsg_label
else:
# doesn't classify as oncogene or tsg
return self.other_label
def set_onco_threshold(self, threshold):
"""Setter for percentage threshold for recurrent missense mutations
to call it an oncogene."""
if 0 < threshold < 1:
self.onco_threshold = threshold
def set_tsg_threshold(self, threshold):
"""Setter for percentage threshold for deleterious mutations to
call it a tsg."""
if 0 < threshold < 1:
self.tsg_threshold = threshold
def set_min_count(self, count):
"""Setter for minimum count that can be classified for either a
oncogene or tsg."""
if count > 0:
self.min_count = count
| apache-2.0 |
DedMemez/ODS-August-2017 | toptoons/PeriodPicker.py | 1 | 2726 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toptoons.PeriodPicker
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase import ToontownGlobals, TTLocalizer
from CategoryPicker import CategoryPicker
class PeriodPicker(DirectFrame):
def __init__(self, successCallback, failCallback, *args, **kwargs):
self.successCallback = successCallback
self.failCallback = failCallback
baseArgs = {'relief': None,
'geom': DGG.getDefaultDialogGeom(),
'geom_color': ToontownGlobals.GlobalDialogColor,
'geom_scale': (1.5, 1, 1),
'pos': (0, 0, -2.5),
'text': TTLocalizer.TopToonsPeriodTitle,
'text_scale': 0.1,
'text_pos': (0, 0.325)}
kwargs.update(baseArgs)
DirectFrame.__init__(self, *args, **kwargs)
self.initialiseoptions(PeriodPicker)
jarGui = loader.loadModel('phase_3.5/models/gui/jar_gui')
jar = jarGui.find('**/Jar')
self.jars = []
for i, x in enumerate([-0.45, 0, 0.45]):
self.jars.append(DirectButton(self, relief=None, state=DGG.DISABLED, image=jar, image_scale=0.25 + i * 0.2, text_scale=0.075, text2_scale=0.085, text_pos=(0, -0.25), text=TTLocalizer.TopToonPeriods[i] + '\n' + TTLocalizer.TopToons, pos=(x, 0, 0), command=self.__choose, extraArgs=[2 - i]))
self.stopButton = DirectButton(self, relief=None, state=DGG.DISABLED, image=Preloaded['circleButton'], image_color=(1, 0.55, 0, 1), image_scale=0.5, pos=(0.65, 0, 0.4), text='X', text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.05, text_pos=(-0.005, -0.01), command=self.__close)
self.auxiliaryGui = None
self.appearSequence = Sequence(self.posInterval(1.5, (0, 0, 0), (0, 0, -2.5), blendType='easeInOut'), Func(self.enableButtons))
self.appearSequence.start()
return
def destroy(self):
DirectFrame.destroy(self)
if self.auxiliaryGui:
self.auxiliaryGui.destroy()
self.auxiliaryGui = None
if not hasattr(self, 'jars'):
return
else:
for jar in self.jars:
jar.destroy()
del self.jars
self.stopButton.destroy()
del self.stopButton
return
def enableButtons(self):
for button in self.jars + [self.stopButton]:
button['state'] = DGG.NORMAL
def __choose(self, period):
self.destroy()
self.auxiliaryGui = CategoryPicker(self.successCallback, self.failCallback, period)
def __close(self):
self.failCallback()
self.destroy() | apache-2.0 |
GoogleCloudPlatform/training-data-analyst | courses/developingapps/python/appengine/end/frontend/quiz/gcp/languageapi.py | 39 | 1413 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Import the language module
from google.cloud import language
# END TODO
# TODO: Import enums and types
from google.cloud.language import enums
from google.cloud.language import types
# END TODO
# TODO: Create the Language API client
lang_client = language.LanguageServiceClient()
# END TODO
"""
Returns sentiment analysis score
- create document from passed text
- do sentiment analysis using natural language applicable
- return the sentiment score
"""
def analyze(text):
# TODO: Create a Document object
doc = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT)
# END TODO
# TODO: Analyze the sentiment
sentiment = lang_client.analyze_sentiment(document=doc).document_sentiment
# END TODO
# TODO: Return the sentiment score
return sentiment.score
# END TODO | apache-2.0 |
kadamski/func | func/minion/modules/filetracker.py | 2 | 6112 | ## func
##
## filetracker
## maintains a manifest of files of which to keep track
## provides file meta-data (and optionally full data) to func-inventory
##
## (C) Vito Laurenza <vitolaurenza@gmail.com>
## + Michael DeHaan <mdehaan@redhat.com>
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
# func modules
import func_module
# other modules
from stat import *
import glob
import os
import md5
# defaults
CONFIG_FILE='/etc/func/modules/filetracker.conf'
class FileTracker(func_module.FuncModule):
version = "0.0.1"
api_version = "0.0.1"
description = "Maintains a manifest of files to keep track of."
def __load(self):
"""
Parse file and return data structure.
"""
filehash = {}
if os.path.exists(CONFIG_FILE):
config = open(CONFIG_FILE, "r")
data = config.read()
lines = data.split("\n")
for line in lines:
tokens = line.split(None)
if len(tokens) < 2:
continue
scan_mode = tokens[0]
path = " ".join(tokens[1:])
if str(scan_mode).lower() == "0":
scan_mode = 0
else:
scan_mode = 1
filehash[path] = scan_mode
return filehash
#==========================================================
def __save(self, filehash):
"""
Write data structure to file.
"""
config = open(CONFIG_FILE, "w+")
for (path, scan_mode) in filehash.iteritems():
config.write("%s %s\n" % (scan_mode, path))
config.close()
#==========================================================
def track(self, file_name, full_scan=0):
"""
Adds files to keep track of.
full_scan implies tracking the full contents of the file, defaults to off
"""
filehash = self.__load()
filehash[file_name] = full_scan
self.__save(filehash)
return 1
#==========================================================
def untrack(self, file_name):
"""
Stop keeping track of a file.
This routine is tolerant of most errors since we're forgetting about the file anyway.
"""
filehash = self.__load()
if file_name in filehash.keys():
del filehash[file_name]
self.__save(filehash)
return 1
#==========================================================
def inventory(self, flatten=1, checksum_enabled=1):
"""
Returns information on all tracked files
By default, 'flatten' is passed in as True, which makes printouts very clean in diffs
for use by func-inventory. If you are writting another software application, using flatten=False will
prevent the need to parse the returns.
"""
# XMLRPC feeds us strings from the CLI when it shouldn't
flatten = int(flatten)
checksum_enabled = int(checksum_enabled)
filehash = self.__load()
# we'll either return a very flat string (for clean diffs)
# or a data structure
if flatten:
results = ""
else:
results = []
for (file_name, scan_type) in filehash.iteritems():
if not os.path.exists(file_name):
if flatten:
results = results + "%s: does not exist\n" % file_name
else:
results.append("%s: does not exist\n" % file_name)
continue
this_result = []
# ----- always process metadata
filestat = os.stat(file_name)
mode = filestat[ST_MODE]
mtime = filestat[ST_MTIME]
uid = filestat[ST_UID]
gid = filestat[ST_GID]
if not os.path.isdir(file_name) and checksum_enabled:
sum_handle = open(file_name)
hash = self.__sumfile(sum_handle)
sum_handle.close()
else:
hash = "N/A"
# ------ what we return depends on flatten
if flatten:
this_result = "%s: mode=%s mtime=%s uid=%s gid=%s md5sum=%s\n" % (file_name,mode,mtime,uid,gid,hash)
else:
this_result = [file_name,mode,mtime,uid,gid,hash]
# ------ add on file data only if requested
if scan_type != 0 and os.path.isfile(file_name):
tracked_file = open(file_name)
data = tracked_file.read()
if flatten:
this_result = this_result + "*** DATA ***\n" + data + "\n*** END DATA ***\n\n"
else:
this_result.append(data)
tracked_file.close()
if os.path.isdir(file_name):
if not file_name.endswith("/"):
file_name = file_name + "/"
files = glob.glob(file_name + "*")
if flatten:
this_result = this_result + "*** FILES ***\n" + "\n".join(files) + "\n*** END FILES ***\n\n"
else:
this_result.append({"files" : files})
if flatten:
results = results + "\n" + this_result
else:
results.append(this_result)
return results
#==========================================================
def __sumfile(self, fobj):
"""
Returns an md5 hash for an object with read() method.
credit: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/266486
"""
m = md5.new()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
| gpl-2.0 |
teriyakichild/ansible-modules-extras | system/svc.py | 5 | 9591 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
module: svc
author: "Brian Coca (@bcoca)"
version_added:
short_description: Manage daemontools services.
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, reloaded, once ]
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -u).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
downed:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
defaults to no. Downed does not imply stopped.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
Make note that a service can be enabled and downed (no auto restart).
service_dir:
required: false
default: /service
description:
- directory svscan watches for services
service_src:
required: false
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start svc dnscache, if not running
- svc: name=dnscache state=started
# Example action to stop svc dnscache, if running
- svc: name=dnscache state=stopped
# Example action to kill svc dnscache, in all cases
- svc : name=dnscache state=killed
# Example action to restart svc dnscache, in all cases
- svc : name=dnscache state=restarted
# Example action to reload svc dnscache, in all cases
- svc: name=dnscache state=reloaded
# Example using alt svc directory location
- svc: name=dnscache state=reloaded service_dir=/var/service
'''
import platform
import shlex
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overriden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ '/command', '/usr/local/bin' ]
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError, e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError, e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
self.execute_command([self.svc_cmd,'-dx',self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd,'-dx',src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search('(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception, e:
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool'),
downed = dict(required=False, type='bool'),
dist = dict(required=False, default='daemontools'),
service_dir = dict(required=False, default='/service'),
service_src = dict(required=False, default='/etc/service'),
),
supports_check_mode=True,
)
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError), e:
module.fail_json(msg="Could change service link: %s" % str(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc,state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError), e:
module.fail_json(msg="Could change downed file: %s " % (str(e)))
module.exit_json(changed=changed, svc=svc.report())
# this is magic, not normal python include
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/networkx/algorithms/components/biconnected.py | 35 | 14462 | # -*- coding: utf-8 -*-
"""
Biconnected components and articulation points.
"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from itertools import chain
import networkx as nx
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>',
'Dan Schult <dschult@colgate.edu>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['biconnected_components',
'biconnected_component_edges',
'biconnected_component_subgraphs',
'is_biconnected',
'articulation_points',
]
def is_biconnected(G):
"""Return True if the graph is biconnected, False otherwise.
A graph is biconnected if, and only if, it cannot be disconnected by
removing only one node (and all edges incident on that node). If
removing a node increases the number of disconnected components
in the graph, that node is called an articulation point, or cut
vertex. A biconnected graph has no articulation points.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
biconnected : bool
True if the graph is biconnected, False otherwise.
Raises
------
NetworkXError :
If the input graph is not undirected.
Examples
--------
>>> G=nx.path_graph(4)
>>> print(nx.is_biconnected(G))
False
>>> G.add_edge(0,3)
>>> print(nx.is_biconnected(G))
True
See Also
--------
biconnected_components,
articulation_points,
biconnected_component_edges,
biconnected_component_subgraphs
Notes
-----
The algorithm to find articulation points and biconnected
components is implemented using a non-recursive depth-first-search
(DFS) that keeps track of the highest level that back edges reach
in the DFS tree. A node `n` is an articulation point if, and only
if, there exists a subtree rooted at `n` such that there is no
back edge from any successor of `n` that links to a predecessor of
`n` in the DFS tree. By keeping track of all the edges traversed
by the DFS we can obtain the biconnected components because all
edges of a bicomponent will be traversed consecutively between
articulation points.
References
----------
.. [1] Hopcroft, J.; Tarjan, R. (1973).
"Efficient algorithms for graph manipulation".
Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
"""
bcc = list(biconnected_components(G))
if not bcc: # No bicomponents (it could be an empty graph)
return False
return len(bcc[0]) == len(G)
def biconnected_component_edges(G):
"""Return a generator of lists of edges, one list for each biconnected
component of the input graph.
Biconnected components are maximal subgraphs such that the removal of a
node (and all edges incident on that node) will not disconnect the
subgraph. Note that nodes may be part of more than one biconnected
component. Those nodes are articulation points, or cut vertices. However,
each edge belongs to one, and only one, biconnected component.
Notice that by convention a dyad is considered a biconnected component.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
edges : generator
Generator of lists of edges, one list for each bicomponent.
Raises
------
NetworkXError :
If the input graph is not undirected.
Examples
--------
>>> G = nx.barbell_graph(4,2)
>>> print(nx.is_biconnected(G))
False
>>> components = nx.biconnected_component_edges(G)
>>> G.add_edge(2,8)
>>> print(nx.is_biconnected(G))
True
>>> components = nx.biconnected_component_edges(G)
See Also
--------
is_biconnected,
biconnected_components,
articulation_points,
biconnected_component_subgraphs
Notes
-----
The algorithm to find articulation points and biconnected
components is implemented using a non-recursive depth-first-search
(DFS) that keeps track of the highest level that back edges reach
in the DFS tree. A node `n` is an articulation point if, and only
if, there exists a subtree rooted at `n` such that there is no
back edge from any successor of `n` that links to a predecessor of
`n` in the DFS tree. By keeping track of all the edges traversed
by the DFS we can obtain the biconnected components because all
edges of a bicomponent will be traversed consecutively between
articulation points.
References
----------
.. [1] Hopcroft, J.; Tarjan, R. (1973).
"Efficient algorithms for graph manipulation".
Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
"""
return sorted(_biconnected_dfs(G,components=True), key=len, reverse=True)
def biconnected_components(G):
"""Return a generator of sets of nodes, one set for each biconnected
component of the graph
Biconnected components are maximal subgraphs such that the removal of a
node (and all edges incident on that node) will not disconnect the
subgraph. Note that nodes may be part of more than one biconnected
component. Those nodes are articulation points, or cut vertices. The
removal of articulation points will increase the number of connected
components of the graph.
Notice that by convention a dyad is considered a biconnected component.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
nodes : generator
Generator of sets of nodes, one set for each biconnected component.
Raises
------
NetworkXError :
If the input graph is not undirected.
Examples
--------
>>> G = nx.barbell_graph(4,2)
>>> print(nx.is_biconnected(G))
False
>>> components = nx.biconnected_components(G)
>>> G.add_edge(2,8)
>>> print(nx.is_biconnected(G))
True
>>> components = nx.biconnected_components(G)
See Also
--------
is_biconnected,
articulation_points,
biconnected_component_edges,
biconnected_component_subgraphs
Notes
-----
The algorithm to find articulation points and biconnected
components is implemented using a non-recursive depth-first-search
(DFS) that keeps track of the highest level that back edges reach
in the DFS tree. A node `n` is an articulation point if, and only
if, there exists a subtree rooted at `n` such that there is no
back edge from any successor of `n` that links to a predecessor of
`n` in the DFS tree. By keeping track of all the edges traversed
by the DFS we can obtain the biconnected components because all
edges of a bicomponent will be traversed consecutively between
articulation points.
References
----------
.. [1] Hopcroft, J.; Tarjan, R. (1973).
"Efficient algorithms for graph manipulation".
Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
"""
bicomponents = (set(chain.from_iterable(comp))
for comp in _biconnected_dfs(G,components=True))
return sorted(bicomponents, key=len, reverse=True)
def biconnected_component_subgraphs(G):
"""Return a generator of graphs, one graph for each biconnected component
of the input graph.
Biconnected components are maximal subgraphs such that the removal of a
node (and all edges incident on that node) will not disconnect the
subgraph. Note that nodes may be part of more than one biconnected
component. Those nodes are articulation points, or cut vertices. The
removal of articulation points will increase the number of connected
components of the graph.
Notice that by convention a dyad is considered a biconnected component.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
graphs : generator
Generator of graphs, one graph for each biconnected component.
Raises
------
NetworkXError :
If the input graph is not undirected.
Examples
--------
>>> G = nx.barbell_graph(4,2)
>>> print(nx.is_biconnected(G))
False
>>> subgraphs = nx.biconnected_component_subgraphs(G)
See Also
--------
is_biconnected,
articulation_points,
biconnected_component_edges,
biconnected_components
Notes
-----
The algorithm to find articulation points and biconnected
components is implemented using a non-recursive depth-first-search
(DFS) that keeps track of the highest level that back edges reach
in the DFS tree. A node `n` is an articulation point if, and only
if, there exists a subtree rooted at `n` such that there is no
back edge from any successor of `n` that links to a predecessor of
`n` in the DFS tree. By keeping track of all the edges traversed
by the DFS we can obtain the biconnected components because all
edges of a bicomponent will be traversed consecutively between
articulation points.
Graph, node, and edge attributes are copied to the subgraphs.
References
----------
.. [1] Hopcroft, J.; Tarjan, R. (1973).
"Efficient algorithms for graph manipulation".
Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
"""
def edge_subgraph(G,edges):
# create new graph and copy subgraph into it
H = G.__class__()
for u,v in edges:
H.add_edge(u,v,attr_dict=G[u][v])
for n in H:
H.node[n]=G.node[n].copy()
H.graph=G.graph.copy()
return H
return (edge_subgraph(G,edges) for edges in
sorted(_biconnected_dfs(G,components=True), key=len, reverse=True))
def articulation_points(G):
"""Return a generator of articulation points, or cut vertices, of a graph.
An articulation point or cut vertex is any node whose removal (along with
all its incident edges) increases the number of connected components of
a graph. An undirected connected graph without articulation points is
biconnected. Articulation points belong to more than one biconnected
component of a graph.
Notice that by convention a dyad is considered a biconnected component.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
articulation points : generator
generator of nodes
Raises
------
NetworkXError :
If the input graph is not undirected.
Examples
--------
>>> G = nx.barbell_graph(4,2)
>>> print(nx.is_biconnected(G))
False
>>> list(nx.articulation_points(G))
[6, 5, 4, 3]
>>> G.add_edge(2,8)
>>> print(nx.is_biconnected(G))
True
>>> list(nx.articulation_points(G))
[]
See Also
--------
is_biconnected,
biconnected_components,
biconnected_component_edges,
biconnected_component_subgraphs
Notes
-----
The algorithm to find articulation points and biconnected
components is implemented using a non-recursive depth-first-search
(DFS) that keeps track of the highest level that back edges reach
in the DFS tree. A node `n` is an articulation point if, and only
if, there exists a subtree rooted at `n` such that there is no
back edge from any successor of `n` that links to a predecessor of
`n` in the DFS tree. By keeping track of all the edges traversed
by the DFS we can obtain the biconnected components because all
edges of a bicomponent will be traversed consecutively between
articulation points.
References
----------
.. [1] Hopcroft, J.; Tarjan, R. (1973).
"Efficient algorithms for graph manipulation".
Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
"""
return _biconnected_dfs(G,components=False)
def _biconnected_dfs(G, components=True):
# depth-first search algorithm to generate articulation points
# and biconnected components
if G.is_directed():
raise nx.NetworkXError('Not allowed for directed graph G. '
'Use UG=G.to_undirected() to create an '
'undirected graph.')
visited = set()
for start in G:
if start in visited:
continue
discovery = {start:0} # "time" of first discovery of node during search
low = {start:0}
root_children = 0
visited.add(start)
edge_stack = []
stack = [(start, start, iter(G[start]))]
while stack:
grandparent, parent, children = stack[-1]
try:
child = next(children)
if grandparent == child:
continue
if child in visited:
if discovery[child] <= discovery[parent]: # back edge
low[parent] = min(low[parent],discovery[child])
if components:
edge_stack.append((parent,child))
else:
low[child] = discovery[child] = len(discovery)
visited.add(child)
stack.append((parent, child, iter(G[child])))
if components:
edge_stack.append((parent,child))
except StopIteration:
stack.pop()
if len(stack) > 1:
if low[parent] >= discovery[grandparent]:
if components:
ind = edge_stack.index((grandparent,parent))
yield edge_stack[ind:]
edge_stack=edge_stack[:ind]
else:
yield grandparent
low[grandparent] = min(low[parent], low[grandparent])
elif stack: # length 1 so grandparent is root
root_children += 1
if components:
ind = edge_stack.index((grandparent,parent))
yield edge_stack[ind:]
if not components:
# root node is articulation point if it has more than 1 child
if root_children > 1:
yield start
| agpl-3.0 |
Tinysymphony/shadowsocks | tests/test.py | 1016 | 5029 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
| apache-2.0 |
SCAII/SCAII | glue/python/scaii/protos/scaii_pb2.py | 1 | 68324 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scaii.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import scaii.protos.cfg_pb2 as cfg__pb2
import scaii.protos.viz_pb2 as viz__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='scaii.proto',
package='scaii.common',
syntax='proto2',
serialized_pb=_b('\n\x0bscaii.proto\x12\x0cscaii.common\x1a\tcfg.proto\x1a\tviz.proto\":\n\x0cMultiMessage\x12*\n\x07packets\x18\x01 \x03(\x0b\x32\x19.scaii.common.ScaiiPacket\"\xb8\x0b\n\x0bScaiiPacket\x12$\n\x05state\x18\x01 \x01(\x0b\x32\x13.scaii.common.StateH\x00\x12&\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x14.scaii.common.ActionH\x00\x12 \n\x03viz\x18\x03 \x01(\x0b\x32\x11.scaii.common.VizH\x00\x12\"\n\x03\x65rr\x18\x04 \x01(\x0b\x32\x13.scaii.common.ErrorH\x00\x12#\n\x06\x63onfig\x18\x05 \x01(\x0b\x32\x11.scaii.common.CfgH\x00\x12\x34\n\tsupported\x18\x06 \x01(\x0b\x32\x1f.scaii.common.SupportedBehaviorH\x00\x12\x35\n\x07ser_req\x18\x07 \x01(\x0b\x32\".scaii.common.SerializationRequestH\x00\x12\x37\n\x08ser_resp\x18\x08 \x01(\x0b\x32#.scaii.common.SerializationResponseH\x00\x12\x13\n\treset_env\x18\t \x01(\x08H\x00\x12)\n\x08viz_init\x18\n \x01(\x0b\x32\x15.scaii.common.VizInitH\x00\x12\x31\n\x0cuser_command\x18\x0b \x01(\x0b\x32\x19.scaii.common.UserCommandH\x00\x12/\n\x0breplay_step\x18\x0c \x01(\x0b\x32\x18.scaii.common.ReplayStepH\x00\x12\x31\n\x0ctest_control\x18\r \x01(\x0b\x32\x19.scaii.common.TestControlH\x00\x12\x33\n\rrecorder_step\x18\x0e \x01(\x0b\x32\x1a.scaii.common.RecorderStepH\x00\x12\x37\n\x0frecorder_config\x18\x0f \x01(\x0b\x32\x1c.scaii.common.RecorderConfigH\x00\x12\x33\n\rgame_complete\x18\x10 \x01(\x0b\x32\x1a.scaii.common.GameCompleteH\x00\x12\x42\n\x15replay_session_config\x18\x11 \x01(\x0b\x32!.scaii.common.ReplaySessionConfigH\x00\x12 \n\x03\x61\x63k\x18\x12 \x01(\x0b\x32\x11.scaii.common.AckH\x00\x12\x12\n\x08\x65mit_viz\x18\x13 \x01(\x08H\x00\x12&\n\x06record\x18\x14 \x01(\x0b\x32\x14.scaii.common.RecordH\x00\x12\x15\n\x0breplay_mode\x18\x15 \x01(\x08H\x00\x12\x30\n\x08\x65nv_desc\x18\x16 \x01(\x0b\x32\x1c.scaii.common.EnvDescriptionH\x00\x12\x38\n\x0c\x65xpl_details\x18\x17 \x01(\x0b\x32 .scaii.common.ExplanationDetailsH\x00\x12\x35\n\x0ereplay_control\x18\x18 \x01(\x0b\x32\x1b.scaii.common.ReplayControlH\x00\x12@\n\x14replay_choice_config\x18\x19 \x01(\x0b\x32 .scaii.common.ReplayChoiceConfigH\x00\x12\x37\n\x0fstudy_questions\x18\x1a \x01(\x0b\x32\x1c.scaii.common.StudyQuestionsH\x00\x12\x42\n\x15study_question_answer\x18\x1b \x01(\x0b\x32!.scaii.common.StudyQuestionAnswerH\x00\x12\x34\n\x0elog_file_entry\x18\x1c \x01(\x0b\x32\x1a.scaii.common.LogFileEntryH\x00\x12$\n\x05other\x18\x1d \x01(\x0b\x32\x13.scaii.common.OtherH\x00\x12#\n\x03src\x18\x1f \x02(\x0b\x32\x16.scaii.common.Endpoint\x12$\n\x04\x64\x65st\x18\x1e \x02(\x0b\x32\x16.scaii.common.EndpointB\x0e\n\x0cspecific_msg\"\x05\n\x03\x41\x63k\"\xb6\x02\n\x08\x45ndpoint\x12\x30\n\x07\x62\x61\x63kend\x18\x01 \x01(\x0b\x32\x1d.scaii.common.BackendEndpointH\x00\x12,\n\x05\x61gent\x18\x02 \x01(\x0b\x32\x1b.scaii.common.AgentEndpointH\x00\x12*\n\x04\x63ore\x18\x03 \x01(\x0b\x32\x1a.scaii.common.CoreEndpointH\x00\x12.\n\x06module\x18\x04 \x01(\x0b\x32\x1c.scaii.common.ModuleEndpointH\x00\x12.\n\x06replay\x18\x05 \x01(\x0b\x32\x1c.scaii.common.ReplayEndpointH\x00\x12\x32\n\x08recorder\x18\x06 \x01(\x0b\x32\x1e.scaii.common.RecorderEndpointH\x00\x42\n\n\x08\x65ndpoint\"\x0c\n\nReplayStep\"\x11\n\x0f\x42\x61\x63kendEndpoint\"\x0f\n\rAgentEndpoint\"\x0e\n\x0c\x43oreEndpoint\"\x1e\n\x0eModuleEndpoint\x12\x0c\n\x04name\x18\x01 \x02(\t\"\x10\n\x0eReplayEndpoint\"\x12\n\x10RecorderEndpoint\"\xdf\x01\n\x05State\x12\x10\n\x08\x66\x65\x61tures\x18\x01 \x03(\x01\x12\x1a\n\x12\x66\x65\x61ture_array_dims\x18\x02 \x03(\r\x12\x0e\n\x06reward\x18\x03 \x01(\x01\x12\x16\n\x0e\x65xpanded_state\x18\x04 \x01(\x0c\x12:\n\x0ctyped_reward\x18\x05 \x03(\x0b\x32$.scaii.common.State.TypedRewardEntry\x12\x10\n\x08terminal\x18\x06 \x02(\x08\x1a\x32\n\x10TypedRewardEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"\x8e\x01\n\x06\x41\x63tion\x12\x18\n\x10\x64iscrete_actions\x18\x01 \x03(\x05\x12\x1a\n\x12\x63ontinuous_actions\x18\x02 \x03(\x01\x12\x19\n\x11\x61lternate_actions\x18\x03 \x01(\x0c\x12\x33\n\x0b\x65xplanation\x18\x04 \x01(\x0b\x32\x1e.scaii.common.ExplanationPoint\"F\n\x05\x45rror\x12\x13\n\x0b\x64\x65scription\x18\x01 \x02(\t\x12\x14\n\x05\x66\x61tal\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\nerror_info\x18\x03 \x01(\x0c\"\"\n\x05Other\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0b\n\x03msg\x18\x02 \x01(\x0c\"I\n\x14SerializationRequest\x12\x31\n\x06\x66ormat\x18\x01 \x02(\x0e\x32!.scaii.common.SerializationFormat\"^\n\x15SerializationResponse\x12\x12\n\nserialized\x18\x01 \x02(\x0c\x12\x31\n\x06\x66ormat\x18\x02 \x02(\x0e\x32!.scaii.common.SerializationFormat\"\xa1\x02\n\x0bUserCommand\x12?\n\x0c\x63ommand_type\x18\x01 \x02(\x0e\x32).scaii.common.UserCommand.UserCommandType\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\"\xc2\x01\n\x0fUserCommandType\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45XPLAIN\x10\x01\x12\t\n\x05PAUSE\x10\x02\x12\n\n\x06RESUME\x10\x03\x12\n\n\x06REWIND\x10\x04\x12\x15\n\x11POLL_FOR_COMMANDS\x10\x05\x12\x10\n\x0cJUMP_TO_STEP\x10\x06\x12\x12\n\x0eJUMP_COMPLETED\x10\x07\x12\r\n\tSET_SPEED\x10\x08\x12\x0f\n\x0bSELECT_FILE\x10\t\x12\x18\n\x14SELECT_FILE_COMPLETE\x10\n\"\x1b\n\x0bTestControl\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\t\"\x84\x01\n\x0cRecorderStep\x12$\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x14.scaii.common.Action\x12\x19\n\x11is_decision_point\x18\x02 \x02(\x08\x12\x33\n\x0b\x65xplanation\x18\x03 \x01(\x0b\x32\x1e.scaii.common.ExplanationPoint\"^\n\x0eRecorderConfig\x12\'\n\x04pkts\x18\x01 \x03(\x0b\x32\x19.scaii.common.ScaiiPacket\x12\x11\n\toverwrite\x18\x02 \x02(\x08\x12\x10\n\x08\x66ilepath\x18\x03 \x01(\t\"\x0e\n\x0cGameComplete\".\n\x12ReplayChoiceConfig\x12\x18\n\x10replay_filenames\x18\x01 \x03(\t\"\x96\x01\n\x13ReplaySessionConfig\x12\x12\n\nstep_count\x18\x01 \x02(\x03\x12\x19\n\x11\x65xplanation_steps\x18\x02 \x03(\r\x12\x1a\n\x12\x65xplanation_titles\x18\x03 \x03(\t\x12\x14\n\x0c\x63hart_titles\x18\x04 \x03(\t\x12\x1e\n\x16suppress_interactivity\x18\x05 \x02(\x08\"#\n\x06Record\x12\x19\n\x11keyframe_interval\x18\x01 \x02(\r\"~\n\x12\x45xplanationDetails\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x32\n\nexpl_point\x18\x02 \x01(\x0b\x32\x1e.scaii.common.ExplanationPoint\x12&\n\x05\x63hart\x18\x03 \x01(\x0b\x32\x17.scaii.common.ChartInfo\" \n\rReplayControl\x12\x0f\n\x07\x63ommand\x18\x01 \x03(\t*6\n\x13SerializationFormat\x12\r\n\tDIVERGING\x10\x00\x12\x10\n\x0cNONDIVERGING\x10\x01')
,
dependencies=[cfg__pb2.DESCRIPTOR,viz__pb2.DESCRIPTOR,])
_SERIALIZATIONFORMAT = _descriptor.EnumDescriptor(
name='SerializationFormat',
full_name='scaii.common.SerializationFormat',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DIVERGING', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONDIVERGING', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3652,
serialized_end=3706,
)
_sym_db.RegisterEnumDescriptor(_SERIALIZATIONFORMAT)
SerializationFormat = enum_type_wrapper.EnumTypeWrapper(_SERIALIZATIONFORMAT)
DIVERGING = 0
NONDIVERGING = 1
_USERCOMMAND_USERCOMMANDTYPE = _descriptor.EnumDescriptor(
name='UserCommandType',
full_name='scaii.common.UserCommand.UserCommandType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXPLAIN', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESUME', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REWIND', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POLL_FOR_COMMANDS', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JUMP_TO_STEP', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JUMP_COMPLETED', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_SPEED', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SELECT_FILE', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SELECT_FILE_COMPLETE', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2780,
serialized_end=2974,
)
_sym_db.RegisterEnumDescriptor(_USERCOMMAND_USERCOMMANDTYPE)
_MULTIMESSAGE = _descriptor.Descriptor(
name='MultiMessage',
full_name='scaii.common.MultiMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packets', full_name='scaii.common.MultiMessage.packets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=109,
)
_SCAIIPACKET = _descriptor.Descriptor(
name='ScaiiPacket',
full_name='scaii.common.ScaiiPacket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='scaii.common.ScaiiPacket.state', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='action', full_name='scaii.common.ScaiiPacket.action', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='viz', full_name='scaii.common.ScaiiPacket.viz', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='err', full_name='scaii.common.ScaiiPacket.err', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='config', full_name='scaii.common.ScaiiPacket.config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='supported', full_name='scaii.common.ScaiiPacket.supported', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ser_req', full_name='scaii.common.ScaiiPacket.ser_req', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ser_resp', full_name='scaii.common.ScaiiPacket.ser_resp', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reset_env', full_name='scaii.common.ScaiiPacket.reset_env', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='viz_init', full_name='scaii.common.ScaiiPacket.viz_init', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_command', full_name='scaii.common.ScaiiPacket.user_command', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay_step', full_name='scaii.common.ScaiiPacket.replay_step', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_control', full_name='scaii.common.ScaiiPacket.test_control', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recorder_step', full_name='scaii.common.ScaiiPacket.recorder_step', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recorder_config', full_name='scaii.common.ScaiiPacket.recorder_config', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='game_complete', full_name='scaii.common.ScaiiPacket.game_complete', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay_session_config', full_name='scaii.common.ScaiiPacket.replay_session_config', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ack', full_name='scaii.common.ScaiiPacket.ack', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='emit_viz', full_name='scaii.common.ScaiiPacket.emit_viz', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='record', full_name='scaii.common.ScaiiPacket.record', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay_mode', full_name='scaii.common.ScaiiPacket.replay_mode', index=20,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='env_desc', full_name='scaii.common.ScaiiPacket.env_desc', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expl_details', full_name='scaii.common.ScaiiPacket.expl_details', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay_control', full_name='scaii.common.ScaiiPacket.replay_control', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay_choice_config', full_name='scaii.common.ScaiiPacket.replay_choice_config', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='study_questions', full_name='scaii.common.ScaiiPacket.study_questions', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='study_question_answer', full_name='scaii.common.ScaiiPacket.study_question_answer', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_file_entry', full_name='scaii.common.ScaiiPacket.log_file_entry', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='other', full_name='scaii.common.ScaiiPacket.other', index=28,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='src', full_name='scaii.common.ScaiiPacket.src', index=29,
number=31, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dest', full_name='scaii.common.ScaiiPacket.dest', index=30,
number=30, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='specific_msg', full_name='scaii.common.ScaiiPacket.specific_msg',
index=0, containing_type=None, fields=[]),
],
serialized_start=112,
serialized_end=1576,
)
_ACK = _descriptor.Descriptor(
name='Ack',
full_name='scaii.common.Ack',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1578,
serialized_end=1583,
)
_ENDPOINT = _descriptor.Descriptor(
name='Endpoint',
full_name='scaii.common.Endpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backend', full_name='scaii.common.Endpoint.backend', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='agent', full_name='scaii.common.Endpoint.agent', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='core', full_name='scaii.common.Endpoint.core', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='module', full_name='scaii.common.Endpoint.module', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replay', full_name='scaii.common.Endpoint.replay', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recorder', full_name='scaii.common.Endpoint.recorder', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='endpoint', full_name='scaii.common.Endpoint.endpoint',
index=0, containing_type=None, fields=[]),
],
serialized_start=1586,
serialized_end=1896,
)
_REPLAYSTEP = _descriptor.Descriptor(
name='ReplayStep',
full_name='scaii.common.ReplayStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1898,
serialized_end=1910,
)
_BACKENDENDPOINT = _descriptor.Descriptor(
name='BackendEndpoint',
full_name='scaii.common.BackendEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1912,
serialized_end=1929,
)
_AGENTENDPOINT = _descriptor.Descriptor(
name='AgentEndpoint',
full_name='scaii.common.AgentEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1931,
serialized_end=1946,
)
_COREENDPOINT = _descriptor.Descriptor(
name='CoreEndpoint',
full_name='scaii.common.CoreEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1948,
serialized_end=1962,
)
_MODULEENDPOINT = _descriptor.Descriptor(
name='ModuleEndpoint',
full_name='scaii.common.ModuleEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='scaii.common.ModuleEndpoint.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1964,
serialized_end=1994,
)
_REPLAYENDPOINT = _descriptor.Descriptor(
name='ReplayEndpoint',
full_name='scaii.common.ReplayEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1996,
serialized_end=2012,
)
_RECORDERENDPOINT = _descriptor.Descriptor(
name='RecorderEndpoint',
full_name='scaii.common.RecorderEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2014,
serialized_end=2032,
)
_STATE_TYPEDREWARDENTRY = _descriptor.Descriptor(
name='TypedRewardEntry',
full_name='scaii.common.State.TypedRewardEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='scaii.common.State.TypedRewardEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='scaii.common.State.TypedRewardEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2208,
serialized_end=2258,
)
_STATE = _descriptor.Descriptor(
name='State',
full_name='scaii.common.State',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='features', full_name='scaii.common.State.features', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_array_dims', full_name='scaii.common.State.feature_array_dims', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reward', full_name='scaii.common.State.reward', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expanded_state', full_name='scaii.common.State.expanded_state', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='typed_reward', full_name='scaii.common.State.typed_reward', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='terminal', full_name='scaii.common.State.terminal', index=5,
number=6, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STATE_TYPEDREWARDENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2035,
serialized_end=2258,
)
_ACTION = _descriptor.Descriptor(
name='Action',
full_name='scaii.common.Action',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='discrete_actions', full_name='scaii.common.Action.discrete_actions', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='continuous_actions', full_name='scaii.common.Action.continuous_actions', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alternate_actions', full_name='scaii.common.Action.alternate_actions', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='explanation', full_name='scaii.common.Action.explanation', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2261,
serialized_end=2403,
)
_ERROR = _descriptor.Descriptor(
name='Error',
full_name='scaii.common.Error',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='description', full_name='scaii.common.Error.description', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fatal', full_name='scaii.common.Error.fatal', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_info', full_name='scaii.common.Error.error_info', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2405,
serialized_end=2475,
)
_OTHER = _descriptor.Descriptor(
name='Other',
full_name='scaii.common.Other',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='scaii.common.Other.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg', full_name='scaii.common.Other.msg', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2477,
serialized_end=2511,
)
_SERIALIZATIONREQUEST = _descriptor.Descriptor(
name='SerializationRequest',
full_name='scaii.common.SerializationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='format', full_name='scaii.common.SerializationRequest.format', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2513,
serialized_end=2586,
)
_SERIALIZATIONRESPONSE = _descriptor.Descriptor(
name='SerializationResponse',
full_name='scaii.common.SerializationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='serialized', full_name='scaii.common.SerializationResponse.serialized', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format', full_name='scaii.common.SerializationResponse.format', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2588,
serialized_end=2682,
)
_USERCOMMAND = _descriptor.Descriptor(
name='UserCommand',
full_name='scaii.common.UserCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='command_type', full_name='scaii.common.UserCommand.command_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='args', full_name='scaii.common.UserCommand.args', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_USERCOMMAND_USERCOMMANDTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2685,
serialized_end=2974,
)
_TESTCONTROL = _descriptor.Descriptor(
name='TestControl',
full_name='scaii.common.TestControl',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='args', full_name='scaii.common.TestControl.args', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2976,
serialized_end=3003,
)
_RECORDERSTEP = _descriptor.Descriptor(
name='RecorderStep',
full_name='scaii.common.RecorderStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='scaii.common.RecorderStep.action', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_decision_point', full_name='scaii.common.RecorderStep.is_decision_point', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='explanation', full_name='scaii.common.RecorderStep.explanation', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3006,
serialized_end=3138,
)
_RECORDERCONFIG = _descriptor.Descriptor(
name='RecorderConfig',
full_name='scaii.common.RecorderConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pkts', full_name='scaii.common.RecorderConfig.pkts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='overwrite', full_name='scaii.common.RecorderConfig.overwrite', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filepath', full_name='scaii.common.RecorderConfig.filepath', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3140,
serialized_end=3234,
)
_GAMECOMPLETE = _descriptor.Descriptor(
name='GameComplete',
full_name='scaii.common.GameComplete',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3236,
serialized_end=3250,
)
_REPLAYCHOICECONFIG = _descriptor.Descriptor(
name='ReplayChoiceConfig',
full_name='scaii.common.ReplayChoiceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='replay_filenames', full_name='scaii.common.ReplayChoiceConfig.replay_filenames', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3252,
serialized_end=3298,
)
_REPLAYSESSIONCONFIG = _descriptor.Descriptor(
name='ReplaySessionConfig',
full_name='scaii.common.ReplaySessionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_count', full_name='scaii.common.ReplaySessionConfig.step_count', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='explanation_steps', full_name='scaii.common.ReplaySessionConfig.explanation_steps', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='explanation_titles', full_name='scaii.common.ReplaySessionConfig.explanation_titles', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chart_titles', full_name='scaii.common.ReplaySessionConfig.chart_titles', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suppress_interactivity', full_name='scaii.common.ReplaySessionConfig.suppress_interactivity', index=4,
number=5, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3301,
serialized_end=3451,
)
_RECORD = _descriptor.Descriptor(
name='Record',
full_name='scaii.common.Record',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keyframe_interval', full_name='scaii.common.Record.keyframe_interval', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3453,
serialized_end=3488,
)
_EXPLANATIONDETAILS = _descriptor.Descriptor(
name='ExplanationDetails',
full_name='scaii.common.ExplanationDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='scaii.common.ExplanationDetails.step', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expl_point', full_name='scaii.common.ExplanationDetails.expl_point', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chart', full_name='scaii.common.ExplanationDetails.chart', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3490,
serialized_end=3616,
)
_REPLAYCONTROL = _descriptor.Descriptor(
name='ReplayControl',
full_name='scaii.common.ReplayControl',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='command', full_name='scaii.common.ReplayControl.command', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3618,
serialized_end=3650,
)
_MULTIMESSAGE.fields_by_name['packets'].message_type = _SCAIIPACKET
_SCAIIPACKET.fields_by_name['state'].message_type = _STATE
_SCAIIPACKET.fields_by_name['action'].message_type = _ACTION
_SCAIIPACKET.fields_by_name['viz'].message_type = viz__pb2._VIZ
_SCAIIPACKET.fields_by_name['err'].message_type = _ERROR
_SCAIIPACKET.fields_by_name['config'].message_type = cfg__pb2._CFG
_SCAIIPACKET.fields_by_name['supported'].message_type = cfg__pb2._SUPPORTEDBEHAVIOR
_SCAIIPACKET.fields_by_name['ser_req'].message_type = _SERIALIZATIONREQUEST
_SCAIIPACKET.fields_by_name['ser_resp'].message_type = _SERIALIZATIONRESPONSE
_SCAIIPACKET.fields_by_name['viz_init'].message_type = viz__pb2._VIZINIT
_SCAIIPACKET.fields_by_name['user_command'].message_type = _USERCOMMAND
_SCAIIPACKET.fields_by_name['replay_step'].message_type = _REPLAYSTEP
_SCAIIPACKET.fields_by_name['test_control'].message_type = _TESTCONTROL
_SCAIIPACKET.fields_by_name['recorder_step'].message_type = _RECORDERSTEP
_SCAIIPACKET.fields_by_name['recorder_config'].message_type = _RECORDERCONFIG
_SCAIIPACKET.fields_by_name['game_complete'].message_type = _GAMECOMPLETE
_SCAIIPACKET.fields_by_name['replay_session_config'].message_type = _REPLAYSESSIONCONFIG
_SCAIIPACKET.fields_by_name['ack'].message_type = _ACK
_SCAIIPACKET.fields_by_name['record'].message_type = _RECORD
_SCAIIPACKET.fields_by_name['env_desc'].message_type = cfg__pb2._ENVDESCRIPTION
_SCAIIPACKET.fields_by_name['expl_details'].message_type = _EXPLANATIONDETAILS
_SCAIIPACKET.fields_by_name['replay_control'].message_type = _REPLAYCONTROL
_SCAIIPACKET.fields_by_name['replay_choice_config'].message_type = _REPLAYCHOICECONFIG
_SCAIIPACKET.fields_by_name['study_questions'].message_type = viz__pb2._STUDYQUESTIONS
_SCAIIPACKET.fields_by_name['study_question_answer'].message_type = viz__pb2._STUDYQUESTIONANSWER
_SCAIIPACKET.fields_by_name['log_file_entry'].message_type = viz__pb2._LOGFILEENTRY
_SCAIIPACKET.fields_by_name['other'].message_type = _OTHER
_SCAIIPACKET.fields_by_name['src'].message_type = _ENDPOINT
_SCAIIPACKET.fields_by_name['dest'].message_type = _ENDPOINT
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['state'])
_SCAIIPACKET.fields_by_name['state'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['action'])
_SCAIIPACKET.fields_by_name['action'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['viz'])
_SCAIIPACKET.fields_by_name['viz'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['err'])
_SCAIIPACKET.fields_by_name['err'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['config'])
_SCAIIPACKET.fields_by_name['config'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['supported'])
_SCAIIPACKET.fields_by_name['supported'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['ser_req'])
_SCAIIPACKET.fields_by_name['ser_req'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['ser_resp'])
_SCAIIPACKET.fields_by_name['ser_resp'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['reset_env'])
_SCAIIPACKET.fields_by_name['reset_env'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['viz_init'])
_SCAIIPACKET.fields_by_name['viz_init'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['user_command'])
_SCAIIPACKET.fields_by_name['user_command'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['replay_step'])
_SCAIIPACKET.fields_by_name['replay_step'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['test_control'])
_SCAIIPACKET.fields_by_name['test_control'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['recorder_step'])
_SCAIIPACKET.fields_by_name['recorder_step'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['recorder_config'])
_SCAIIPACKET.fields_by_name['recorder_config'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['game_complete'])
_SCAIIPACKET.fields_by_name['game_complete'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['replay_session_config'])
_SCAIIPACKET.fields_by_name['replay_session_config'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['ack'])
_SCAIIPACKET.fields_by_name['ack'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['emit_viz'])
_SCAIIPACKET.fields_by_name['emit_viz'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['record'])
_SCAIIPACKET.fields_by_name['record'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['replay_mode'])
_SCAIIPACKET.fields_by_name['replay_mode'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['env_desc'])
_SCAIIPACKET.fields_by_name['env_desc'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['expl_details'])
_SCAIIPACKET.fields_by_name['expl_details'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['replay_control'])
_SCAIIPACKET.fields_by_name['replay_control'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['replay_choice_config'])
_SCAIIPACKET.fields_by_name['replay_choice_config'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['study_questions'])
_SCAIIPACKET.fields_by_name['study_questions'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['study_question_answer'])
_SCAIIPACKET.fields_by_name['study_question_answer'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['log_file_entry'])
_SCAIIPACKET.fields_by_name['log_file_entry'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_SCAIIPACKET.oneofs_by_name['specific_msg'].fields.append(
_SCAIIPACKET.fields_by_name['other'])
_SCAIIPACKET.fields_by_name['other'].containing_oneof = _SCAIIPACKET.oneofs_by_name['specific_msg']
_ENDPOINT.fields_by_name['backend'].message_type = _BACKENDENDPOINT
_ENDPOINT.fields_by_name['agent'].message_type = _AGENTENDPOINT
_ENDPOINT.fields_by_name['core'].message_type = _COREENDPOINT
_ENDPOINT.fields_by_name['module'].message_type = _MODULEENDPOINT
_ENDPOINT.fields_by_name['replay'].message_type = _REPLAYENDPOINT
_ENDPOINT.fields_by_name['recorder'].message_type = _RECORDERENDPOINT
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['backend'])
_ENDPOINT.fields_by_name['backend'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['agent'])
_ENDPOINT.fields_by_name['agent'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['core'])
_ENDPOINT.fields_by_name['core'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['module'])
_ENDPOINT.fields_by_name['module'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['replay'])
_ENDPOINT.fields_by_name['replay'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_ENDPOINT.oneofs_by_name['endpoint'].fields.append(
_ENDPOINT.fields_by_name['recorder'])
_ENDPOINT.fields_by_name['recorder'].containing_oneof = _ENDPOINT.oneofs_by_name['endpoint']
_STATE_TYPEDREWARDENTRY.containing_type = _STATE
_STATE.fields_by_name['typed_reward'].message_type = _STATE_TYPEDREWARDENTRY
_ACTION.fields_by_name['explanation'].message_type = viz__pb2._EXPLANATIONPOINT
_SERIALIZATIONREQUEST.fields_by_name['format'].enum_type = _SERIALIZATIONFORMAT
_SERIALIZATIONRESPONSE.fields_by_name['format'].enum_type = _SERIALIZATIONFORMAT
_USERCOMMAND.fields_by_name['command_type'].enum_type = _USERCOMMAND_USERCOMMANDTYPE
_USERCOMMAND_USERCOMMANDTYPE.containing_type = _USERCOMMAND
_RECORDERSTEP.fields_by_name['action'].message_type = _ACTION
_RECORDERSTEP.fields_by_name['explanation'].message_type = viz__pb2._EXPLANATIONPOINT
_RECORDERCONFIG.fields_by_name['pkts'].message_type = _SCAIIPACKET
_EXPLANATIONDETAILS.fields_by_name['expl_point'].message_type = viz__pb2._EXPLANATIONPOINT
_EXPLANATIONDETAILS.fields_by_name['chart'].message_type = viz__pb2._CHARTINFO
DESCRIPTOR.message_types_by_name['MultiMessage'] = _MULTIMESSAGE
DESCRIPTOR.message_types_by_name['ScaiiPacket'] = _SCAIIPACKET
DESCRIPTOR.message_types_by_name['Ack'] = _ACK
DESCRIPTOR.message_types_by_name['Endpoint'] = _ENDPOINT
DESCRIPTOR.message_types_by_name['ReplayStep'] = _REPLAYSTEP
DESCRIPTOR.message_types_by_name['BackendEndpoint'] = _BACKENDENDPOINT
DESCRIPTOR.message_types_by_name['AgentEndpoint'] = _AGENTENDPOINT
DESCRIPTOR.message_types_by_name['CoreEndpoint'] = _COREENDPOINT
DESCRIPTOR.message_types_by_name['ModuleEndpoint'] = _MODULEENDPOINT
DESCRIPTOR.message_types_by_name['ReplayEndpoint'] = _REPLAYENDPOINT
DESCRIPTOR.message_types_by_name['RecorderEndpoint'] = _RECORDERENDPOINT
DESCRIPTOR.message_types_by_name['State'] = _STATE
DESCRIPTOR.message_types_by_name['Action'] = _ACTION
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['Other'] = _OTHER
DESCRIPTOR.message_types_by_name['SerializationRequest'] = _SERIALIZATIONREQUEST
DESCRIPTOR.message_types_by_name['SerializationResponse'] = _SERIALIZATIONRESPONSE
DESCRIPTOR.message_types_by_name['UserCommand'] = _USERCOMMAND
DESCRIPTOR.message_types_by_name['TestControl'] = _TESTCONTROL
DESCRIPTOR.message_types_by_name['RecorderStep'] = _RECORDERSTEP
DESCRIPTOR.message_types_by_name['RecorderConfig'] = _RECORDERCONFIG
DESCRIPTOR.message_types_by_name['GameComplete'] = _GAMECOMPLETE
DESCRIPTOR.message_types_by_name['ReplayChoiceConfig'] = _REPLAYCHOICECONFIG
DESCRIPTOR.message_types_by_name['ReplaySessionConfig'] = _REPLAYSESSIONCONFIG
DESCRIPTOR.message_types_by_name['Record'] = _RECORD
DESCRIPTOR.message_types_by_name['ExplanationDetails'] = _EXPLANATIONDETAILS
DESCRIPTOR.message_types_by_name['ReplayControl'] = _REPLAYCONTROL
DESCRIPTOR.enum_types_by_name['SerializationFormat'] = _SERIALIZATIONFORMAT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MultiMessage = _reflection.GeneratedProtocolMessageType('MultiMessage', (_message.Message,), dict(
DESCRIPTOR = _MULTIMESSAGE,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.MultiMessage)
))
_sym_db.RegisterMessage(MultiMessage)
ScaiiPacket = _reflection.GeneratedProtocolMessageType('ScaiiPacket', (_message.Message,), dict(
DESCRIPTOR = _SCAIIPACKET,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ScaiiPacket)
))
_sym_db.RegisterMessage(ScaiiPacket)
Ack = _reflection.GeneratedProtocolMessageType('Ack', (_message.Message,), dict(
DESCRIPTOR = _ACK,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Ack)
))
_sym_db.RegisterMessage(Ack)
Endpoint = _reflection.GeneratedProtocolMessageType('Endpoint', (_message.Message,), dict(
DESCRIPTOR = _ENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Endpoint)
))
_sym_db.RegisterMessage(Endpoint)
ReplayStep = _reflection.GeneratedProtocolMessageType('ReplayStep', (_message.Message,), dict(
DESCRIPTOR = _REPLAYSTEP,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ReplayStep)
))
_sym_db.RegisterMessage(ReplayStep)
BackendEndpoint = _reflection.GeneratedProtocolMessageType('BackendEndpoint', (_message.Message,), dict(
DESCRIPTOR = _BACKENDENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.BackendEndpoint)
))
_sym_db.RegisterMessage(BackendEndpoint)
AgentEndpoint = _reflection.GeneratedProtocolMessageType('AgentEndpoint', (_message.Message,), dict(
DESCRIPTOR = _AGENTENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.AgentEndpoint)
))
_sym_db.RegisterMessage(AgentEndpoint)
CoreEndpoint = _reflection.GeneratedProtocolMessageType('CoreEndpoint', (_message.Message,), dict(
DESCRIPTOR = _COREENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.CoreEndpoint)
))
_sym_db.RegisterMessage(CoreEndpoint)
ModuleEndpoint = _reflection.GeneratedProtocolMessageType('ModuleEndpoint', (_message.Message,), dict(
DESCRIPTOR = _MODULEENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ModuleEndpoint)
))
_sym_db.RegisterMessage(ModuleEndpoint)
ReplayEndpoint = _reflection.GeneratedProtocolMessageType('ReplayEndpoint', (_message.Message,), dict(
DESCRIPTOR = _REPLAYENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ReplayEndpoint)
))
_sym_db.RegisterMessage(ReplayEndpoint)
RecorderEndpoint = _reflection.GeneratedProtocolMessageType('RecorderEndpoint', (_message.Message,), dict(
DESCRIPTOR = _RECORDERENDPOINT,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.RecorderEndpoint)
))
_sym_db.RegisterMessage(RecorderEndpoint)
State = _reflection.GeneratedProtocolMessageType('State', (_message.Message,), dict(
TypedRewardEntry = _reflection.GeneratedProtocolMessageType('TypedRewardEntry', (_message.Message,), dict(
DESCRIPTOR = _STATE_TYPEDREWARDENTRY,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.State.TypedRewardEntry)
))
,
DESCRIPTOR = _STATE,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.State)
))
_sym_db.RegisterMessage(State)
_sym_db.RegisterMessage(State.TypedRewardEntry)
Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), dict(
DESCRIPTOR = _ACTION,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Action)
))
_sym_db.RegisterMessage(Action)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Error)
))
_sym_db.RegisterMessage(Error)
Other = _reflection.GeneratedProtocolMessageType('Other', (_message.Message,), dict(
DESCRIPTOR = _OTHER,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Other)
))
_sym_db.RegisterMessage(Other)
SerializationRequest = _reflection.GeneratedProtocolMessageType('SerializationRequest', (_message.Message,), dict(
DESCRIPTOR = _SERIALIZATIONREQUEST,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.SerializationRequest)
))
_sym_db.RegisterMessage(SerializationRequest)
SerializationResponse = _reflection.GeneratedProtocolMessageType('SerializationResponse', (_message.Message,), dict(
DESCRIPTOR = _SERIALIZATIONRESPONSE,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.SerializationResponse)
))
_sym_db.RegisterMessage(SerializationResponse)
UserCommand = _reflection.GeneratedProtocolMessageType('UserCommand', (_message.Message,), dict(
DESCRIPTOR = _USERCOMMAND,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.UserCommand)
))
_sym_db.RegisterMessage(UserCommand)
TestControl = _reflection.GeneratedProtocolMessageType('TestControl', (_message.Message,), dict(
DESCRIPTOR = _TESTCONTROL,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.TestControl)
))
_sym_db.RegisterMessage(TestControl)
RecorderStep = _reflection.GeneratedProtocolMessageType('RecorderStep', (_message.Message,), dict(
DESCRIPTOR = _RECORDERSTEP,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.RecorderStep)
))
_sym_db.RegisterMessage(RecorderStep)
RecorderConfig = _reflection.GeneratedProtocolMessageType('RecorderConfig', (_message.Message,), dict(
DESCRIPTOR = _RECORDERCONFIG,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.RecorderConfig)
))
_sym_db.RegisterMessage(RecorderConfig)
GameComplete = _reflection.GeneratedProtocolMessageType('GameComplete', (_message.Message,), dict(
DESCRIPTOR = _GAMECOMPLETE,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.GameComplete)
))
_sym_db.RegisterMessage(GameComplete)
ReplayChoiceConfig = _reflection.GeneratedProtocolMessageType('ReplayChoiceConfig', (_message.Message,), dict(
DESCRIPTOR = _REPLAYCHOICECONFIG,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ReplayChoiceConfig)
))
_sym_db.RegisterMessage(ReplayChoiceConfig)
ReplaySessionConfig = _reflection.GeneratedProtocolMessageType('ReplaySessionConfig', (_message.Message,), dict(
DESCRIPTOR = _REPLAYSESSIONCONFIG,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ReplaySessionConfig)
))
_sym_db.RegisterMessage(ReplaySessionConfig)
Record = _reflection.GeneratedProtocolMessageType('Record', (_message.Message,), dict(
DESCRIPTOR = _RECORD,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.Record)
))
_sym_db.RegisterMessage(Record)
ExplanationDetails = _reflection.GeneratedProtocolMessageType('ExplanationDetails', (_message.Message,), dict(
DESCRIPTOR = _EXPLANATIONDETAILS,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ExplanationDetails)
))
_sym_db.RegisterMessage(ExplanationDetails)
ReplayControl = _reflection.GeneratedProtocolMessageType('ReplayControl', (_message.Message,), dict(
DESCRIPTOR = _REPLAYCONTROL,
__module__ = 'scaii_pb2'
# @@protoc_insertion_point(class_scope:scaii.common.ReplayControl)
))
_sym_db.RegisterMessage(ReplayControl)
_STATE_TYPEDREWARDENTRY.has_options = True
_STATE_TYPEDREWARDENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/python/tools/strip_unused_lib.py | 25 | 3759 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.framework import graph_util
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type.
Returns:
A GraphDef with all unnecessary ops removed.
"""
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
inputs_replaced_graph_def = tf.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
placeholder_node = tf.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
placeholder_node.attr["dtype"].CopyFrom(tf.AttrValue(
type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(
node.attr["_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not tf.gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = tf.GraphDef()
mode = "rb" if input_binary else "r"
with tf.gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
output_graph_def = strip_unused(input_graph_def, input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with tf.gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| apache-2.0 |
chintak/scikit-image | doc/examples/plot_hog.py | 2 | 4351 | """
===============================
Histogram of Oriented Gradients
===============================
The `Histogram of Oriented Gradient
<http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients>`__ (HOG) feature
descriptor [1]_ is popular for object detection.
In the following example, we compute the HOG descriptor and display
a visualisation.
Algorithm overview
------------------
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalisation
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalising across blocks
5. flattening into a feature vector
The first stage applies an optional global image normalisation
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each colour channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
colour channel is used, which provides colour invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [2]_
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
The fourth stage computes normalisation, which takes local groups of
cells and contrast normalises their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalise each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalisations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalisations. This may seem redundant but it improves the performance.
We refer to the normalised block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
References
----------
.. [1] Dalal, N. and Triggs, B., "Histograms of Oriented Gradients for
Human Detection," IEEE Computer Society Conference on Computer
Vision and Pattern Recognition, 2005, San Diego, CA, USA.
.. [2] David G. Lowe, "Distinctive image features from scale-invariant
keypoints," International Journal of Computer Vision, 60, 2 (2004),
pp. 91-110.
"""
import matplotlib.pyplot as plt
from skimage.feature import hog
from skimage import data, color, exposure
image = color.rgb2gray(data.lena())
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
plt.figure(figsize=(8, 4))
plt.subplot(121).set_axis_off()
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
plt.subplot(122).set_axis_off()
plt.imshow(hog_image_rescaled, cmap=plt.cm.gray)
plt.title('Histogram of Oriented Gradients')
plt.show()
| bsd-3-clause |
CiscoSystems/quantum | neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py | 4 | 20175 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the spec
import copy
import mock
from oslo.config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class LoadBalancerTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
loadbalancer.RESOURCE_ATTRIBUTE_MAP)
return loadbalancer.Loadbalancer.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class LoadBalancerExtensionTestCase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
super(LoadBalancerExtensionTestCase, self).setUp()
plugin = 'neutron.extensions.loadbalancer.LoadBalancerPluginBase'
# Ensure 'stale' patched copies of the plugin are never returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', test_api_v2.etcdir('neutron.conf.test')]
config.parse(args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.get_plugin_type.return_value = constants.LOADBALANCER
ext_mgr = LoadBalancerTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
super(LoadBalancerExtensionTestCase, self).setUp()
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
super(LoadBalancerExtensionTestCase, self).tearDown()
def test_vip_create(self):
vip_id = _uuid()
data = {'vip': {'name': 'vip1',
'description': 'descr_vip1',
'subnet_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'protocol': 'HTTP',
'pool_id': _uuid(),
'session_persistence': {'type': 'HTTP_COOKIE'},
'connection_limit': 100,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['vip'])
return_value.update({'status': "ACTIVE", 'id': vip_id})
instance = self.plugin.return_value
instance.create_vip.return_value = return_value
res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vip.assert_called_with(mock.ANY,
vip=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_list(self):
vip_id = _uuid()
return_value = [{'name': 'vip1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': vip_id}]
instance = self.plugin.return_value
instance.get_vips.return_value = return_value
res = self.api.get(_get_path('lb/vips', fmt=self.fmt))
instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_vip_update(self):
vip_id = _uuid()
update_data = {'vip': {'admin_state_up': False}}
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.update_vip.return_value = return_value
res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_vip.assert_called_with(mock.ANY, vip_id,
vip=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_get(self):
vip_id = _uuid()
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.get_vip.return_value = return_value
res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt))
instance.get_vip.assert_called_with(mock.ANY, vip_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def _test_entity_delete(self, entity):
"""Does the entity deletion based on naming convention."""
entity_id = _uuid()
res = self.api.delete(_get_path('lb/' + entity + 's', id=entity_id,
fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_vip_delete(self):
self._test_entity_delete('vip')
def test_pool_create(self):
pool_id = _uuid()
hm_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'subnet_id': _uuid(),
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': [hm_id],
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['pool'])
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lb/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_stats(self):
pool_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lb/pools', id=pool_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, pool_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(res['stats'], stats['stats'])
def test_member_create(self):
member_id = _uuid()
data = {'member': {'pool_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'status': "ACTIVE", 'id': member_id})
instance = self.plugin.return_value
instance.create_member.return_value = return_value
res = self.api.post(_get_path('lb/members', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_member.assert_called_with(mock.ANY,
member=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_members.return_value = return_value
res = self.api.get(_get_path('lb/members', fmt=self.fmt))
instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_member_update(self):
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.update_member.return_value = return_value
res = self.api.put(_get_path('lb/members', id=member_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_member.assert_called_with(mock.ANY, member_id,
member=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.get_member.return_value = return_value
res = self.api.get(_get_path('lb/members', id=member_id,
fmt=self.fmt))
instance.get_member.assert_called_with(mock.ANY, member_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_delete(self):
self._test_entity_delete('member')
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
instance = self.plugin.return_value
instance.create_health_monitor.return_value = return_value
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_health_monitor.assert_called_with(mock.ANY,
health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_health_monitors.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt))
instance.get_health_monitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'health_monitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_health_monitor.return_value = return_value
res = self.api.put(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, health_monitor=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_health_monitor.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_delete(self):
self._test_entity_delete('health_monitor')
def test_create_pool_health_monitor(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'id': health_monitor_id,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
instance = self.plugin.return_value
instance.create_pool_health_monitor.return_value = return_value
res = self.api.post('/lb/pools/id1/health_monitors',
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool_health_monitor.assert_called_with(
mock.ANY, pool_id='id1', health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_delete_pool_health_monitor(self):
health_monitor_id = _uuid()
res = self.api.delete('/lb/pools/id1/health_monitors/%s' %
health_monitor_id)
instance = self.plugin.return_value
instance.delete_pool_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, pool_id='id1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
class LoadBalancerExtensionTestCaseXML(LoadBalancerExtensionTestCase):
fmt = 'xml'
| apache-2.0 |
powellb/seapy | seapy/qserver.py | 3 | 2886 | #!/usr/bin/env python
"""
This module will execute any number of tasks using a queue of the
specified number of threads.
Define a list of qserver.task objects and then tell the server to
execute the list with a given number of threads.
**Examples**
>>> tasks = (qserver.os_task("list files","ls -1"),
>>> qserver.task("my job",my_func,arg1,arg2,arg3))
>>> qserver.execute(tasks, nthreads=2)
"""
import sys
if sys.version_info < (3, 0):
from Queue import Queue
else:
from queue import Queue
import threading
import subprocess
import sys
import datetime
from seapy.timeout import timeout, TimeoutError
class task:
"""
task class simply defines a task for the queue server to process.
It requires a descriptive name of the task for logging and the command
to execute.
Parameters
----------
name : string
title of the task
cmd : string
shell command with arguments to ru
Returns
-------
none
"""
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
"""
subclass of task to simply call shell commands
It requires a descriptive name of the task for logging, the method
to call, and a list of arguments for the method.
Parameters
----------
name : string
title of the task
cmd : method name, function pointer
method to call
args : vary [optional]
arguments to pass to cmd
Returns
-------
none
"""
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName() + " running " +
item.name + " at " + str(datetime.datetime.now()) + "\n")
sys.stdout.flush()
item.run()
print(self.getName() + " completed " + item.name +
" at " + str(datetime.datetime.now()) + "\n")
sys.stdout.flush()
self.queue.task_done()
def execute(tasks, nthreads=2):
"""
Run the list of tasks in a queued server with the specified number of
threads.
Parameters
----------
tasks : list
list of task classes to execute in the queue server
nthreads: int
number of threads to use to process tasks in the queue
Returns
-------
None
"""
q = Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
| mit |
dgjustice/ansible | lib/ansible/modules/cloud/openstack/os_subnet.py | 14 | 13512 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
- Required when I(state) is 'present'
required: false
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet. Required when I(state) is 'present' and a subnetpool
is not specified.
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
no_gateway_ip:
description:
- The gateway IP would not be assigned for this subnet
required: false
default: false
version_added: "2.2"
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
required: false
default: None
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
required: false
default: None
host_routes:
description:
- A list of host route dictionaries for the subnet.
required: false
default: None
ipv6_ra_mode:
description:
- IPv6 router advertisement mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
ipv6_address_mode:
description:
- IPv6 address mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
use_default_subnetpool:
description:
- Use the default subnetpool for I(ip_version) to obtain a CIDR.
required: false
default: false
project:
description:
- Project name or ID containing the subnet (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state: present
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 12.34.56.78
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state: absent
name: net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
state: present
name: intv6
network_name: internal
ip_version: 6
cidr: 2db8:1::/64
dns_nameservers:
- 2001:4860:4860::8888
- 2001:4860:4860::8844
ipv6_ra_mode: dhcpv6-stateless
ipv6_address_mode: dhcpv6-stateless
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _can_update(subnet, module, cloud):
"""Check for differences in non-updatable values"""
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = int(module.params['ip_version'])
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
if network_name:
network = cloud.get_network(network_name)
if network:
netid = network['id']
else:
module.fail_json(msg='No network found for %s' % network_name)
if netid != subnet['network_id']:
module.fail_json(msg='Cannot update network_name in existing \
subnet')
if ip_version and subnet['ip_version'] != ip_version:
module.fail_json(msg='Cannot update ip_version in existing subnet')
if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
module.fail_json(msg='Cannot update ipv6_address_mode in existing \
subnet')
def _needs_update(subnet, module, cloud):
"""Check for differences in the updatable values."""
# First check if we are trying to update something we're not allowed to
_can_update(subnet, module, cloud)
# now check for the things we are allowed to update
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
if no_gateway_ip and subnet['gateway_ip']:
return True
return False
def _system_state_change(module, subnet, cloud):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module, cloud)
if state == 'absent' and subnet:
return True
return False
def main():
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
no_gateway_ip=dict(default=False, type='bool'),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
use_default_subnetpool=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
use_default_subnetpool = module.params['use_default_subnetpool']
project = module.params.pop('project')
if (use_default_subnetpool and
StrictVersion(shade.__version__) < StrictVersion('1.16.0')):
module.fail_json(msg="To utilize use_default_subnetpool, the installed"
" version of the shade library MUST be >=1.16.0")
# Check for required parameters when state == 'present'
if state == 'present':
if not module.params['network_name']:
module.fail_json(msg='network_name required with present state')
if not module.params['cidr'] and not use_default_subnetpool:
module.fail_json(msg='cidr or use_default_subnetpool required '
'with present state')
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
if no_gateway_ip and gateway_ip:
module.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
subnet = cloud.get_subnet(subnet_name, filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet,
cloud))
if state == 'present':
if not subnet:
subnet = cloud.create_subnet(
network_name, cidr,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode,
use_default_subnetpool=use_default_subnetpool,
tenant_id=project_id)
changed = True
else:
if _needs_update(subnet, module, cloud):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
changed = False
module.exit_json(changed=changed,
subnet=subnet,
id=subnet['id'])
elif state == 'absent':
if not subnet:
changed = False
else:
changed = True
cloud.delete_subnet(subnet_name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
reinout/django | tests/view_tests/tests/test_default_content_type.py | 60 | 1761 | import sys
from types import ModuleType
from django.conf import Settings
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango30Warning
class DefaultContentTypeTests(SimpleTestCase):
msg = 'The DEFAULT_CONTENT_TYPE setting is deprecated.'
@ignore_warnings(category=RemovedInDjango30Warning)
def test_default_content_type_is_text_html(self):
"""
Content-Type of the default error responses is text/html. Refs #20822.
"""
with self.settings(DEFAULT_CONTENT_TYPE='text/xml'):
response = self.client.get('/raises400/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/raises403/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/nonexistent_url/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/server_error/')
self.assertEqual(response['Content-Type'], 'text/html')
def test_override_settings_warning(self):
with self.assertRaisesMessage(RemovedInDjango30Warning, self.msg):
with self.settings(DEFAULT_CONTENT_TYPE='text/xml'):
pass
def test_settings_init_warning(self):
settings_module = ModuleType('fake_settings_module')
settings_module.DEFAULT_CONTENT_TYPE = 'text/xml'
settings_module.SECRET_KEY = 'abc'
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaisesMessage(RemovedInDjango30Warning, self.msg):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
| bsd-3-clause |
amarzavery/AutoRest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Lro/fixtures/acceptancetestslro/operations/lr_os_custom_header_operations.py | 14 | 15784 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LROsCustomHeaderOperations(object):
"""LROsCustomHeaderOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def put_async_retry_succeeded(
self, product=None, custom_headers=None, raw=False, **operation_config):
"""x-ms-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 is
required message header for all requests. Long running put request,
service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in
the Azure-AsyncOperation header for operation status.
:param product: Product to put
:type product: :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/lro/customheader/putasync/retry/succeeded'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if product is not None:
body_content = self._serialize.body(product, 'Product')
else:
body_content = None
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Product', response)
header_dict = {
'Azure-AsyncOperation': 'str',
'Location': 'str',
'Retry-After': 'int',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def put201_creating_succeeded200(
self, product=None, custom_headers=None, raw=False, **operation_config):
"""x-ms-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 is
required message header for all requests. Long running put request,
service returns a 201 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Polls return this value until
the last poll returns a ‘200’ with ProvisioningState=’Succeeded’.
:param product: Product to put
:type product: :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/lro/customheader/put/201/creating/succeeded/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if product is not None:
body_content = self._serialize.body(product, 'Product')
else:
body_content = None
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Product', response)
if response.status_code == 201:
deserialized = self._deserialize('Product', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def post202_retry200(
self, product=None, custom_headers=None, raw=False, **operation_config):
"""x-ms-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 is
required message header for all requests. Long running post request,
service returns a 202 to the initial request, with 'Location' and
'Retry-After' headers, Polls return a 200 with a response body after
success.
:param product: Product to put
:type product: :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/lro/customheader/post/202/retry/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if product is not None:
body_content = self._serialize.body(product, 'Product')
else:
body_content = None
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
'Retry-After': 'int',
})
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def post_async_retry_succeeded(
self, product=None, custom_headers=None, raw=False, **operation_config):
"""x-ms-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 is
required message header for all requests. Long running post request,
service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in
the Azure-AsyncOperation header for operation status.
:param product: Product to put
:type product: :class:`Product
<fixtures.acceptancetestslro.models.Product>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/lro/customheader/postasync/retry/succeeded'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if product is not None:
body_content = self._serialize.body(product, 'Product')
else:
body_content = None
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Azure-AsyncOperation': 'str',
'Location': 'str',
'Retry-After': 'int',
})
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| mit |
Antiun/odoomrp-wip | mrp_production_project_estimated_cost/wizard/wiz_create_fictitious_of.py | 4 | 3345 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class WizCreateFictitiousOf(models.TransientModel):
_name = "wiz.create.fictitious.of"
date_planned = fields.Datetime(
string='Scheduled Date', required=True, default=fields.Datetime.now)
load_on_product = fields.Boolean("Load cost on product")
project_id = fields.Many2one("project.project", string="Project")
@api.multi
def do_create_fictitious_of(self):
production_obj = self.env['mrp.production']
product_obj = self.env['product.product']
routing_obj = self.env['mrp.routing']
self.ensure_one()
active_ids = self.env.context['active_ids']
active_model = self.env.context['active_model']
production_list = []
if active_model == 'product.template':
cond = [('product_tmpl_id', 'in', active_ids)]
product_list = product_obj.search(cond)
else:
product_list = product_obj.browse(active_ids)
for product in product_list:
vals = {'product_id': product.id,
'product_template': product.product_tmpl_id.id,
'product_qty': 1,
'date_planned': self.date_planned,
'user_id': self._uid,
'active': False,
'product_uom': product.uom_id.id,
'project_id': self.project_id.id,
'analytic_account_id': (
self.project_id.analytic_account_id.id)
}
prod_vals = production_obj.product_id_change(product.id,
1)['value']
vals.update(prod_vals)
if 'routing_id' in vals:
routing = routing_obj.browse(vals['routing_id'])
product_qty = production_obj._get_min_qty_for_production(
routing) or 1
vals['product_qty'] = product_qty
prod_vals = production_obj.product_id_change(
product.id, product_qty)['value']
vals.update(prod_vals)
vals['product_attributes'] = [tuple([0, 0, line]) for line in
vals.get('product_attributes', [])]
new_production = production_obj.create(vals)
new_production.action_compute()
new_production.calculate_production_estimated_cost()
production_list.append(new_production.id)
if self.load_on_product:
for production_id in production_list:
try:
production = production_obj.browse(production_id)
production.load_product_std_price()
except:
continue
return {'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
'domain': "[('id','in'," + str(production_list) + "), "
"('active','=',False)]"
}
| agpl-3.0 |
technologiescollege/s2a_fr | s2a/Python/Lib/test/test_commands.py | 130 | 2640 | '''
Tests for commands module
Nick Mathewson
'''
import unittest
import os, tempfile, re
from test.test_support import run_unittest, reap_children, import_module, \
check_warnings
# Silence Py3k warning
commands = import_module('commands', deprecated=True)
# The module says:
# "NB This only works (and is only relevant) for UNIX."
#
# Actually, getoutput should work on any platform with an os.popen, but
# I'll take the comment as given, and skip this suite.
if os.name != 'posix':
raise unittest.SkipTest('Not posix; skipping test_commands')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(commands.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(commands.getstatusoutput('echo xyzzy'), (0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = commands.getstatusoutput('cat ' + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test_getstatus(self):
# This pattern should match 'ls -ld /.' on any posix
# system, however perversely configured. Even on systems
# (e.g., Cygwin) where user and group names can have spaces:
# drwxr-xr-x 15 Administ Domain U 4096 Aug 12 12:50 /
# drwxr-xr-x 15 Joe User My Group 4096 Aug 12 12:50 /
# Note that the first case above has a space in the group name
# while the second one has a space in both names.
# Special attributes supported:
# + = has ACLs
# @ = has Mac OS X extended attributes
# . = has a SELinux security context
pat = r'''d......... # It is a directory.
[.+@]? # It may have special attributes.
\s+\d+ # It has some number of links.
[^/]* # Skip user, group, size, and date.
/\. # and end with the name of the file.
'''
with check_warnings((".*commands.getstatus.. is deprecated",
DeprecationWarning)):
self.assertTrue(re.match(pat, commands.getstatus("/."), re.VERBOSE))
def test_main():
run_unittest(CommandTests)
reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
beni55/scrapy | scrapy/utils/sitemap.py | 146 | 1410 | """
Module for processing Sitemaps.
Note: The main purpose of this module is to provide support for the
SitemapSpider, its API is subject to change without notice.
"""
import lxml.etree
class Sitemap(object):
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext):
xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt
def __iter__(self):
for elem in self._root.getchildren():
d = {}
for el in elem.getchildren():
tag = el.tag
name = tag.split('}', 1)[1] if '}' in tag else tag
if name == 'link':
if 'href' in el.attrib:
d.setdefault('alternate', []).append(el.get('href'))
else:
d[name] = el.text.strip() if el.text else ''
if 'loc' in d:
yield d
def sitemap_urls_from_robots(robots_text):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
yield line.split(':', 1)[1].strip()
| bsd-3-clause |
xunzhang/orc | c++/libs/protobuf-2.6.0/gtest/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
detiber/ansible-modules-core | cloud/openstack/os_image.py | 12 | 6329 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space (in GB) required to boot this image
required: false
default: None
min_ram:
description:
- The minimum ram (in MB) required to boot this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
description:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
description:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(type='int', default=0),
min_ram = dict(type='int', default=0),
is_public = dict(default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout'],
is_public=module.params['is_public'],
min_disk=module.params['min_disk'],
min_ram=module.params['min_ram']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 |
lexor90/node-compiler | node/tools/icu/icutrim.py | 11 | 12101 | #!/usr/bin/python
#
# Copyright (C) 2014 IBM Corporation and Others. All Rights Reserved.
#
# @author Steven R. Loomis <srl@icu-project.org>
#
# This tool slims down an ICU data (.dat) file according to a config file.
#
# See: http://bugs.icu-project.org/trac/ticket/10922
#
# Usage:
# Use "-h" to get help options.
import sys
import shutil
# for utf-8
reload(sys)
sys.setdefaultencoding("utf-8")
import optparse
import os
import json
import re
endian=sys.byteorder
parser = optparse.OptionParser(usage="usage: mkdir tmp ; %prog -D ~/Downloads/icudt53l.dat -T tmp -F trim_en.json -O icudt53l.dat" )
parser.add_option("-P","--tool-path",
action="store",
dest="toolpath",
help="set the prefix directory for ICU tools")
parser.add_option("-D","--input-file",
action="store",
dest="datfile",
help="input data file (icudt__.dat)",
) # required
parser.add_option("-F","--filter-file",
action="store",
dest="filterfile",
help="filter file (JSON format)",
) # required
parser.add_option("-T","--tmp-dir",
action="store",
dest="tmpdir",
help="working directory.",
) # required
parser.add_option("--delete-tmp",
action="count",
dest="deltmpdir",
help="delete working directory.",
default=0)
parser.add_option("-O","--outfile",
action="store",
dest="outfile",
help="outfile (NOT a full path)",
) # required
parser.add_option("-v","--verbose",
action="count",
default=0)
parser.add_option('-L',"--locales",
action="store",
dest="locales",
help="sets the 'locales.only' variable",
default=None)
parser.add_option('-e', '--endian', action='store', dest='endian', help='endian, big, little or host, your default is "%s".' % endian, default=endian, metavar='endianness')
(options, args) = parser.parse_args()
optVars = vars(options)
for opt in [ "datfile", "filterfile", "tmpdir", "outfile" ]:
if optVars[opt] is None:
print "Missing required option: %s" % opt
sys.exit(1)
if options.verbose>0:
print "Options: "+str(options)
if (os.path.isdir(options.tmpdir) and options.deltmpdir):
if options.verbose>1:
print "Deleting tmp dir %s.." % (options.tmpdir)
shutil.rmtree(options.tmpdir)
if not (os.path.isdir(options.tmpdir)):
os.mkdir(options.tmpdir)
else:
print "Please delete tmpdir %s before beginning." % options.tmpdir
sys.exit(1)
if options.endian not in ("big","little","host"):
print "Unknown endianness: %s" % options.endian
sys.exit(1)
if options.endian is "host":
options.endian = endian
if not os.path.isdir(options.tmpdir):
print "Error, tmpdir not a directory: %s" % (options.tmpdir)
sys.exit(1)
if not os.path.isfile(options.filterfile):
print "Filterfile doesn't exist: %s" % (options.filterfile)
sys.exit(1)
if not os.path.isfile(options.datfile):
print "Datfile doesn't exist: %s" % (options.datfile)
sys.exit(1)
if not options.datfile.endswith(".dat"):
print "Datfile doesn't end with .dat: %s" % (options.datfile)
sys.exit(1)
outfile = os.path.join(options.tmpdir, options.outfile)
if os.path.isfile(outfile):
print "Error, output file does exist: %s" % (outfile)
sys.exit(1)
if not options.outfile.endswith(".dat"):
print "Outfile doesn't end with .dat: %s" % (options.outfile)
sys.exit(1)
dataname=options.outfile[0:-4]
## TODO: need to improve this. Quotes, etc.
def runcmd(tool, cmd, doContinue=False):
if(options.toolpath):
cmd = os.path.join(options.toolpath, tool) + " " + cmd
else:
cmd = tool + " " + cmd
if(options.verbose>4):
print "# " + cmd
rc = os.system(cmd)
if rc is not 0 and not doContinue:
print "FAILED: %s" % cmd
sys.exit(1)
return rc
## STEP 0 - read in json config
fi= open(options.filterfile, "rb")
config=json.load(fi)
fi.close()
if (options.locales):
if not config.has_key("variables"):
config["variables"] = {}
if not config["variables"].has_key("locales"):
config["variables"]["locales"] = {}
config["variables"]["locales"]["only"] = options.locales.split(',')
if (options.verbose > 6):
print config
if(config.has_key("comment")):
print "%s: %s" % (options.filterfile, config["comment"])
## STEP 1 - copy the data file, swapping endianness
## The first letter of endian_letter will be 'b' or 'l' for big or little
endian_letter = options.endian[0]
runcmd("icupkg", "-t%s %s %s""" % (endian_letter, options.datfile, outfile))
## STEP 2 - get listing
listfile = os.path.join(options.tmpdir,"icudata.lst")
runcmd("icupkg", "-l %s > %s""" % (outfile, listfile))
fi = open(listfile, 'rb')
items = fi.readlines()
items = [items[i].strip() for i in range(len(items))]
fi.close()
itemset = set(items)
if (options.verbose>1):
print "input file: %d items" % (len(items))
# list of all trees
trees = {}
RES_INDX = "res_index.res"
remove = None
# remove - always remove these
if config.has_key("remove"):
remove = set(config["remove"])
else:
remove = set()
# keep - always keep these
if config.has_key("keep"):
keep = set(config["keep"])
else:
keep = set()
def queueForRemoval(tree):
global remove
if not config.has_key("trees"):
# no config
return
if not config["trees"].has_key(tree):
return
mytree = trees[tree]
if(options.verbose>0):
print "* %s: %d items" % (tree, len(mytree["locs"]))
# do varible substitution for this tree here
if type(config["trees"][tree]) == str or type(config["trees"][tree]) == unicode:
treeStr = config["trees"][tree]
if(options.verbose>5):
print " Substituting $%s for tree %s" % (treeStr, tree)
if(not config.has_key("variables") or not config["variables"].has_key(treeStr)):
print " ERROR: no variable: variables.%s for tree %s" % (treeStr, tree)
sys.exit(1)
config["trees"][tree] = config["variables"][treeStr]
myconfig = config["trees"][tree]
if(options.verbose>4):
print " Config: %s" % (myconfig)
# Process this tree
if(len(myconfig)==0 or len(mytree["locs"])==0):
if(options.verbose>2):
print " No processing for %s - skipping" % (tree)
else:
only = None
if myconfig.has_key("only"):
only = set(myconfig["only"])
if (len(only)==0) and (mytree["treeprefix"] != ""):
thePool = "%spool.res" % (mytree["treeprefix"])
if (thePool in itemset):
if(options.verbose>0):
print "Removing %s because tree %s is empty." % (thePool, tree)
remove.add(thePool)
else:
print "tree %s - no ONLY"
for l in range(len(mytree["locs"])):
loc = mytree["locs"][l]
if (only is not None) and not loc in only:
# REMOVE loc
toRemove = "%s%s%s" % (mytree["treeprefix"], loc, mytree["extension"])
if(options.verbose>6):
print "Queueing for removal: %s" % toRemove
remove.add(toRemove)
def addTreeByType(tree, mytree):
if(options.verbose>1):
print "(considering %s): %s" % (tree, mytree)
trees[tree] = mytree
mytree["locs"]=[]
for i in range(len(items)):
item = items[i]
if item.startswith(mytree["treeprefix"]) and item.endswith(mytree["extension"]):
mytree["locs"].append(item[len(mytree["treeprefix"]):-4])
# now, process
queueForRemoval(tree)
addTreeByType("converters",{"treeprefix":"", "extension":".cnv"})
addTreeByType("stringprep",{"treeprefix":"", "extension":".spp"})
addTreeByType("translit",{"treeprefix":"translit/", "extension":".res"})
addTreeByType("brkfiles",{"treeprefix":"brkitr/", "extension":".brk"})
addTreeByType("brkdict",{"treeprefix":"brkitr/", "extension":"dict"})
addTreeByType("confusables",{"treeprefix":"", "extension":".cfu"})
for i in range(len(items)):
item = items[i]
if item.endswith(RES_INDX):
treeprefix = item[0:item.rindex(RES_INDX)]
tree = None
if treeprefix == "":
tree = "ROOT"
else:
tree = treeprefix[0:-1]
if(options.verbose>6):
print "procesing %s" % (tree)
trees[tree] = { "extension": ".res", "treeprefix": treeprefix, "hasIndex": True }
# read in the resource list for the tree
treelistfile = os.path.join(options.tmpdir,"%s.lst" % tree)
runcmd("iculslocs", "-i %s -N %s -T %s -l > %s" % (outfile, dataname, tree, treelistfile))
fi = open(treelistfile, 'rb')
treeitems = fi.readlines()
trees[tree]["locs"] = [treeitems[i].strip() for i in range(len(treeitems))]
fi.close()
if(not config.has_key("trees") or not config["trees"].has_key(tree)):
print " Warning: filter file %s does not mention trees.%s - will be kept as-is" % (options.filterfile, tree)
else:
queueForRemoval(tree)
def removeList(count=0):
# don't allow "keep" items to creep in here.
global remove
remove = remove - keep
if(count > 10):
print "Giving up - %dth attempt at removal." % count
sys.exit(1)
if(options.verbose>1):
print "%d items to remove - try #%d" % (len(remove),count)
if(len(remove)>0):
oldcount = len(remove)
hackerrfile=os.path.join(options.tmpdir, "REMOVE.err")
removefile = os.path.join(options.tmpdir, "REMOVE.lst")
fi = open(removefile, 'wb')
for i in remove:
print >>fi, i
fi.close()
rc = runcmd("icupkg","-r %s %s 2> %s" % (removefile,outfile,hackerrfile),True)
if rc is not 0:
if(options.verbose>5):
print "## Damage control, trying to parse stderr from icupkg.."
fi = open(hackerrfile, 'rb')
erritems = fi.readlines()
fi.close()
#Item zone/zh_Hant_TW.res depends on missing item zone/zh_Hant.res
pat = re.compile("""^Item ([^ ]+) depends on missing item ([^ ]+).*""")
for i in range(len(erritems)):
line = erritems[i].strip()
m = pat.match(line)
if m:
toDelete = m.group(1)
if(options.verbose > 5):
print "<< %s added to delete" % toDelete
remove.add(toDelete)
else:
print "ERROR: could not match errline: %s" % line
sys.exit(1)
if(options.verbose > 5):
print " now %d items to remove" % len(remove)
if(oldcount == len(remove)):
print " ERROR: could not add any mor eitems to remove. Fail."
sys.exit(1)
removeList(count+1)
# fire it up
removeList(1)
# now, fixup res_index, one at a time
for tree in trees:
# skip trees that don't have res_index
if not trees[tree].has_key("hasIndex"):
continue
treebunddir = options.tmpdir
if(trees[tree]["treeprefix"]):
treebunddir = os.path.join(treebunddir, trees[tree]["treeprefix"])
if not (os.path.isdir(treebunddir)):
os.mkdir(treebunddir)
treebundres = os.path.join(treebunddir,RES_INDX)
treebundtxt = "%s.txt" % (treebundres[0:-4])
runcmd("iculslocs", "-i %s -N %s -T %s -b %s" % (outfile, dataname, tree, treebundtxt))
runcmd("genrb","-d %s -s %s res_index.txt" % (treebunddir, treebunddir))
runcmd("icupkg","-s %s -a %s%s %s" % (options.tmpdir, trees[tree]["treeprefix"], RES_INDX, outfile))
| mit |
BoltzmannBrain/nupic.research | projects/sequence_prediction/reberGrammar/reberSequence_CompareTMvsLSTM.py | 13 | 2320 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.ion()
rcParams.update({'figure.autolayout': True})
def plotResult():
resultTM = np.load('result/reberSequenceTM.npz')
resultLSTM = np.load('result/reberSequenceLSTM.npz')
plt.figure()
plt.hold(True)
plt.subplot(2,2,1)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['correctRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['correctRateAll'],1),'-s',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate (Best Match) (%)')
plt.subplot(2,2,4)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['missRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['missRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['fpRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['fpRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_CompareTM&LSTMperformance.pdf')
if __name__ == "__main__":
plotResult()
| agpl-3.0 |
iTyran/CocosBuilder | CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 141 | 117912 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[^ -~]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other nonprintable characters within the ASCII range (0 through 127
# inclusive) are encoded as "\U001f" referring to the Unicode code point in
# hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
if is_dir:
file_type = 'folder'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
self._properties['lastKnownFileType'] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
G-P-S/depot_tools | third_party/logilab/common/urllib2ext.py | 92 | 3416 | from __future__ import print_function
import logging
import urllib2
import kerberos as krb
class GssapiAuthError(Exception):
"""raised on error during authentication process"""
import re
RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
def get_negociate_value(headers):
for authreq in headers.getheaders('www-authenticate'):
match = RGX.search(authreq)
if match:
return match.group(1)
class HTTPGssapiAuthHandler(urllib2.BaseHandler):
"""Negotiate HTTP authentication using context from GSSAPI"""
handler_order = 400 # before Digest Auth
def __init__(self):
self._reset()
def _reset(self):
self._retried = 0
self._context = None
def clean_context(self):
if self._context is not None:
krb.authGSSClientClean(self._context)
def http_error_401(self, req, fp, code, msg, headers):
try:
if self._retried > 5:
raise urllib2.HTTPError(req.get_full_url(), 401,
"negotiate auth failed", headers, None)
self._retried += 1
logging.debug('gssapi handler, try %s' % self._retried)
negotiate = get_negociate_value(headers)
if negotiate is None:
logging.debug('no negociate found in a www-authenticate header')
return None
logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate)
result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host())
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 0:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result)
client_response = krb.authGSSClientResponse(self._context)
logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10])
req.add_unredirected_header('Authorization', "Negotiate %s" % client_response)
server_response = self.parent.open(req)
negotiate = get_negociate_value(server_response.info())
if negotiate is None:
logging.warning('HTTPGssapiAuthHandler: failed to authenticate server')
else:
logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result)
return server_response
except GssapiAuthError as exc:
logging.error(repr(exc))
finally:
self.clean_context()
self._reset()
if __name__ == '__main__':
import sys
# debug
import httplib
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
# debug
import logging
logging.basicConfig(level=logging.DEBUG)
# handle cookies
import cookielib
cj = cookielib.CookieJar()
ch = urllib2.HTTPCookieProcessor(cj)
# test with url sys.argv[1]
h = HTTPGssapiAuthHandler()
response = urllib2.build_opener(h, ch).open(sys.argv[1])
print('\nresponse: %s\n--------------\n' % response.code, response.info())
| bsd-3-clause |
mstriemer/zamboni | mkt/api/tests/test_authentication.py | 19 | 8078 | from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from mock import Mock, patch
from multidb.pinning import this_thread_is_pinned, unpin_this_thread
from nose.tools import eq_, ok_
from rest_framework.request import Request
from mkt.access.models import Group, GroupUser
from mkt.api import authentication
from mkt.api.middleware import (APIBaseMiddleware, RestOAuthMiddleware,
RestSharedSecretMiddleware)
from mkt.api.models import Access
from mkt.api.tests.test_oauth import OAuthClient
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.tests import TestCase
from mkt.users.models import UserProfile
class TestRestOAuthAuthentication(TestCase):
fixtures = fixture('user_2519', 'group_admin', 'group_editor')
def setUp(self):
self.api_name = 'foo'
self.profile = UserProfile.objects.get(pk=2519)
self.profile.update(read_dev_agreement=datetime.today())
self.access = Access.objects.create(key='test_oauth_key',
secret='super secret',
user=self.profile)
self.auth = authentication.RestOAuthAuthentication()
self.middlewares = [APIBaseMiddleware, RestOAuthMiddleware]
unpin_this_thread()
def call(self, client=None):
client = client or OAuthClient(self.access)
# Make a fake POST somewhere. We use POST in order to properly test db
# pinning after auth.
url = absolutify('/api/whatever')
req = RequestFactory().post(
url, HTTP_HOST='testserver',
HTTP_AUTHORIZATION=client.sign('POST', url)[1]['Authorization'])
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
return req
def add_group_user(self, user, *names):
for name in names:
group = Group.objects.get(name=name)
GroupUser.objects.create(user=self.profile, group=group)
def test_accepted(self):
req = Request(self.call())
eq_(self.auth.authenticate(req), (self.profile, None))
def test_request_token_fake(self):
c = Mock()
c.key = self.access.key
c.secret = 'mom'
ok_(not self.auth.authenticate(
Request(self.call(client=OAuthClient(c)))))
ok_(not this_thread_is_pinned())
def test_request_admin(self):
self.add_group_user(self.profile, 'Admins')
ok_(not self.auth.authenticate(Request(self.call())))
def test_request_has_role(self):
self.add_group_user(self.profile, 'App Reviewers')
ok_(self.auth.authenticate(Request(self.call())))
class TestRestAnonymousAuthentication(TestCase):
def setUp(self):
self.auth = authentication.RestAnonymousAuthentication()
self.request = RequestFactory().post('/api/whatever')
unpin_this_thread()
def test_auth(self):
user, token = self.auth.authenticate(self.request)
ok_(isinstance(user, AnonymousUser))
eq_(token, None)
ok_(not this_thread_is_pinned())
@patch.object(settings, 'SECRET_KEY', 'gubbish')
class TestSharedSecretAuthentication(TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.auth = authentication.RestSharedSecretAuthentication()
self.profile = UserProfile.objects.get(pk=2519)
self.profile.update(email=self.profile.email)
self.middlewares = [APIBaseMiddleware,
RestSharedSecretMiddleware]
unpin_this_thread()
def test_session_auth_query(self):
req = RequestFactory().post(
'/api/?_user=cfinke@m.com,56b6f1a3dd735d962c56ce7d8f46e02ec1d4748d'
'2c00c407d75f0969d08bb9c68c31b3371aa8130317815c89e5072e31bb94b4121'
'c5c165f3515838d4d6c60c4,165d631d3c3045458b4516242dad7ae')
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
ok_(self.auth.authenticate(Request(req)))
ok_(req.user.is_authenticated())
eq_(self.profile.pk, req.user.pk)
def test_failed_session_auth_query(self):
req = RequestFactory().post('/api/?_user=bogus')
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
ok_(not self.auth.authenticate(Request(req)))
ok_(not req.user.is_authenticated())
def test_session_auth(self):
req = RequestFactory().post(
'/api/',
HTTP_AUTHORIZATION='mkt-shared-secret '
'cfinke@m.com,56b6f1a3dd735d962c56'
'ce7d8f46e02ec1d4748d2c00c407d75f0969d08bb'
'9c68c31b3371aa8130317815c89e5072e31bb94b4'
'121c5c165f3515838d4d6c60c4,165d631d3c3045'
'458b4516242dad7ae')
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
ok_(self.auth.authenticate(Request(req)))
ok_(req.user.is_authenticated())
eq_(self.profile.pk, req.user.pk)
def test_failed_session_auth(self):
req = RequestFactory().post(
'/api/',
HTTP_AUTHORIZATION='mkt-shared-secret bogus')
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
ok_(not self.auth.authenticate(Request(req)))
ok_(not req.user.is_authenticated())
def test_session_auth_no_post(self):
req = RequestFactory().post('/api/')
req.user = AnonymousUser()
for m in self.middlewares:
m().process_request(req)
ok_(not self.auth.authenticate(Request(req)))
ok_(not req.user.is_authenticated())
@patch.object(settings, 'SECRET_KEY', 'gubbish')
class TestMultipleAuthenticationDRF(TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.profile = UserProfile.objects.get(pk=2519)
def test_multiple_shared_works(self):
request = RequestFactory().post(
'/api',
HTTP_AUTHORIZATION='mkt-shared-secret '
'cfinke@m.com,56b6f1a3dd735d962c56'
'ce7d8f46e02ec1d4748d2c00c407d75f0969d08bb'
'9c68c31b3371aa8130317815c89e5072e31bb94b4'
'121c5c165f3515838d4d6c60c4,165d631d3c3045'
'458b4516242dad7ae')
request.user = AnonymousUser()
drf_request = Request(request)
# Start with an AnonymousUser on the request, because that's a classic
# situation: we already went through a middleware, it didn't find a
# session cookie, if set request.user = AnonymousUser(), and now we
# are going through the authentication code in the API.
request.user = AnonymousUser()
# Call middleware as they would normally be called.
APIBaseMiddleware().process_request(request)
RestSharedSecretMiddleware().process_request(request)
RestOAuthMiddleware().process_request(request)
drf_request.authenticators = (
authentication.RestSharedSecretAuthentication(),
authentication.RestOAuthAuthentication())
eq_(drf_request.user, self.profile)
eq_(drf_request._request.user, self.profile)
eq_(drf_request.user.is_authenticated(), True)
eq_(drf_request._request.user.is_authenticated(), True)
eq_(drf_request.user.pk, self.profile.pk)
eq_(drf_request._request.user.pk, self.profile.pk)
def test_multiple_fail(self):
request = RequestFactory().post('/api')
request.user = AnonymousUser()
drf_request = Request(request)
request.user = AnonymousUser()
drf_request.authenticators = (
authentication.RestSharedSecretAuthentication(),
authentication.RestOAuthAuthentication())
eq_(drf_request.user.is_authenticated(), False)
eq_(drf_request._request.user.is_authenticated(), False)
| bsd-3-clause |
frodrigo/navitia | source/jormungandr/jormungandr/modules/v1_routing/v1_routing.py | 6 | 9118 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from jormungandr.interfaces.v1 import Uri
from jormungandr.interfaces.v1 import Coverage
from jormungandr.interfaces.v1 import Journeys
from jormungandr.interfaces.v1 import Schedules
from jormungandr.interfaces.v1 import Places
from jormungandr.interfaces.v1 import Ptobjects
from jormungandr.interfaces.v1 import Coord
from jormungandr.interfaces.v1 import Disruptions
from jormungandr.interfaces.v1 import Calendars
from jormungandr.interfaces.v1 import converters_collection_type
from jormungandr.interfaces.v1 import Status
from werkzeug.routing import BaseConverter, FloatConverter, PathConverter
from jormungandr.modules_loader import AModule
from resources import Index
class RegionConverter(BaseConverter):
""" The region you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
self.regex = '[^(/;)]+'
class LonConverter(FloatConverter):
""" The longitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class LatConverter(FloatConverter):
""" The latitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class UriConverter(PathConverter):
"""First part of the uri"""
def __init__(self, *args, **kwargs):
PathConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class IdConverter(BaseConverter):
"""Id of the object you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class V1Routing(AModule):
def __init__(self, api, name):
super(V1Routing, self).__init__(api, name,
description='Current version of navitia API',
status='current',
index_endpoint='index')
def setup(self):
self.api.app.url_map.converters['region'] = RegionConverter
self.api.app.url_map.converters['lon'] = LonConverter
self.api.app.url_map.converters['lat'] = LatConverter
self.api.app.url_map.converters['uri'] = UriConverter
self.api.app.url_map.converters['id'] = IdConverter
self.api.app.url_map.strict_slashes = False
self.module_resources_manager.register_resource(Index.Index())
self.add_resource(Index.Index,
'/',
'',
endpoint='index')
self.module_resources_manager.register_resource(Index.TechnicalStatus())
self.add_resource(Index.TechnicalStatus,
'/status',
endpoint='technical_status')
coverage = '/coverage/'
region = coverage + '<region:region>/'
coord = coverage + '<lon:lon>;<lat:lat>/'
self.add_resource(Coverage.Coverage,
coverage,
region,
coord,
endpoint='coverage')
self.add_resource(Coord.Coord,
'/coord/<lon:lon>;<lat:lat>',
endpoint='coord')
collecs = converters_collection_type.collections_to_resource_type.keys()
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
region + collection,
coord + collection,
region + '<uri:uri>/' + collection,
coord + '<uri:uri>/' + collection,
endpoint=collection + '.collection')
self.add_resource(getattr(Uri, collection)(False),
region + collection + '/<id:id>',
coord + collection + '/<id:id>',
region + '<uri:uri>/' + collection + '/<id:id>',
coord + '<uri:uri>/' + collection + '/<id:id>',
endpoint=collection + '.id')
self.add_url_rule(
'/coverage/' + collection + '/<string:id>',
collection + '.redirect',
Uri.Redirect)
collecs = ["routes", "lines", "line_groups", "networks", "stop_areas", "stop_points",
"vehicle_journeys"]
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
'/' + collection,
endpoint=collection + '.external_codes')
self.add_resource(Places.Places,
region + 'places',
coord + 'places',
'/places',
endpoint='places')
self.add_resource(Ptobjects.Ptobjects,
region + 'pt_objects',
coord + 'pt_objects',
endpoint='pt_objects')
self.add_resource(Places.PlaceUri,
region + 'places/<id:id>',
coord + 'places/<id:id>',
endpoint='place_uri')
self.add_resource(Places.PlacesNearby,
region + 'places_nearby',
coord + 'places_nearby',
region + '<uri:uri>/places_nearby',
coord + '<uri:uri>/places_nearby',
endpoint='places_nearby')
self.add_resource(Journeys.Journeys,
region + '<uri:uri>/journeys',
coord + '<uri:uri>/journeys',
region + 'journeys',
coord + 'journeys',
'/journeys',
endpoint='journeys')
self.add_resource(Schedules.RouteSchedules,
region + '<uri:uri>/route_schedules',
coord + '<uri:uri>/route_schedules',
'/route_schedules',
endpoint='route_schedules')
self.add_resource(Schedules.NextArrivals,
region + '<uri:uri>/arrivals',
coord + '<uri:uri>/arrivals',
region + 'arrivals',
coord + 'arrivals',
endpoint='arrivals')
self.add_resource(Schedules.NextDepartures,
region + '<uri:uri>/departures',
coord + '<uri:uri>/departures',
region + 'departures',
coord + 'departures',
endpoint='departures')
self.add_resource(Schedules.StopSchedules,
region + '<uri:uri>/stop_schedules',
coord + '<uri:uri>/stop_schedules',
'/stop_schedules',
endpoint='stop_schedules')
self.add_resource(Disruptions.TrafficReport,
region + 'traffic_reports',
region + '<uri:uri>/traffic_reports',
endpoint='traffic_reports')
self.add_resource(Status.Status,
region + 'status',
endpoint='status')
self.add_resource(Calendars.Calendars,
region + 'calendars',
region + '<uri:uri>/calendars',
region + "calendars/<id:id>",
endpoint="calendars")
| agpl-3.0 |
google/offline-content-packager | third_party/nkata/scripts/utils/content.py | 1 | 4034 | # Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for bundling and getting sections.
"""
import logging
from os import listdir
from os.path import join
import click
from scripts.transformations import HtmlTransformation
from scripts.utils.fileutil import copy_files
def bundle_content_section(src_path, dst_path, section, config, online_link):
"""Bundles content.
Calls copy_files method after setting necessary
parameters in HtmlTransformation
Args:
src_path: Path to the content to be bundled
dst_path: Path where the bundled content will be written
section: Section to be bundled
config: Dictionary containing configuration parameters
online_link: URL to content online
"""
# Initialising a list of transformations
logging.info("Start bundling files from " + section + ".")
transformations = []
if online_link and not "http://" in online_link:
online_link = "http://" + online_link
link_color = config["link_color"]
tracking_code = config["tracking_code"]
html_transform = HtmlTransformation(color=link_color, code=tracking_code,
link=online_link)
transformations.append(html_transform)
paths = (src_path, dst_path)
copy_files(paths, section, transformations)
click.echo("\n")
logging.info("Finish bundling files from " + section + ".")
def bundle_video_section(paths, vid, metadata, transformations, videos_src):
"""Bundles videos.
Calls copy_files method after setting necessary
parameters in HtmlTransformation for videos only.
Args:
paths: Tuple of source and destination directories
vid: Video Section
metadata: Video metadata
transformations: Transformation object
videos_src: Path to video source directory
"""
logging.info("Start bundling videos from " + vid + ".")
copy_files(paths, vid, transformations, metadata, videos_src)
click.echo("\n")
logging.info("Finish bundling videos from " + vid + ".")
def get_divisions(division, ignored_sections):
"""Gets division in a list after removing the ignored_paths.
Args:
division: Division object
ignored_sections: Sections to ignore
Returns:
List of sections for bundling
"""
def is_content_division(item):
"""Removes ignored_section.
Args:
item: section to be checked
Returns:
False if item is to be ignored else returns true
"""
if item in ignored_sections:
return False
item_list = item.split("/")
if item_list[0] in ignored_sections:
return False
return True
return ([item for item in division if is_content_division(item)],
[item.replace(ignored_sections[0] + "/", "")
for item in division if not is_content_division(item)])
def get_sections(src_dir, ignored_paths):
"""Gets section in a list after removing the ignored_paths.
Args:
src_dir: Source directory for content
ignored_paths: Paths to content to be ignored
Returns:
List of sections to be processed
"""
def is_content_section(item):
"""Removes ignored_paths and section starting with ".".
Args:
item: Section to be checked
Returns:
False if item is to be ignored, returns True otherwise
"""
if item.startswith("."):
return False
path = join(src_dir, item)
if path in ignored_paths:
return False
return True
return [item for item in listdir(src_dir) if is_content_section(item)]
| apache-2.0 |
silenceli/nova | nova/tests/functional/v3/test_security_groups.py | 10 | 6114 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.network.security_group import neutron_driver
from nova.tests.functional.v3 import test_servers
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 1
nova_group['description'] = 'default'
nova_group['name'] = 'default'
nova_group['project_id'] = 'openstack'
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(self, context, servers,
detailed=False):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
def fake_add_to_instance(self, context, instance, security_group_name):
pass
def fake_remove_from_instance(self, context, instance, security_group_name):
pass
def fake_list(self, context, names=None, ids=None, project=None,
search_opts=None):
return [fake_get()]
def fake_get_instance_security_groups(self, context, instance_uuid,
detailed=False):
return [fake_get()]
def fake_create_security_group(self, context, name, description):
return fake_get()
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-security-groups'
def setUp(self):
self.flags(security_group_api=('neutron'))
super(SecurityGroupsJsonTest, self).setUp()
self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'add_to_instance',
fake_add_to_instance)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'remove_from_instance',
fake_remove_from_instance)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'list',
fake_list)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'create_security_group',
fake_create_security_group)
def test_server_create(self):
self._post_server()
def test_server_get(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_server_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
def _get_create_subs(self):
return {
'group_name': 'default',
"description": "default",
}
def _create_security_group(self):
subs = self._get_create_subs()
return self._do_post('os-security-groups',
'security-group-post-req', subs)
def _add_group(self, uuid):
subs = {
'group_name': 'test'
}
return self._do_post('servers/%s/action' % uuid,
'security-group-add-post-req', subs)
def test_security_group_create(self):
response = self._create_security_group()
subs = self._get_create_subs()
self._verify_response('security-groups-create-resp', subs,
response, 200)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
self._verify_response('security-groups-list-get-resp',
subs, response, 200)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '11111111-1111-1111-1111-111111111111'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
self._verify_response('security-groups-get-resp', subs, response, 200)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
self._verify_response('server-security-groups-list-resp',
subs, response, 200)
def test_security_groups_add(self):
self._create_security_group()
uuid = self._post_server()
response = self._add_group(uuid)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_security_groups_remove(self):
self._create_security_group()
uuid = self._post_server()
self._add_group(uuid)
subs = {
'group_name': 'test'
}
response = self._do_post('servers/%s/action' % uuid,
'security-group-remove-post-req', subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| apache-2.0 |
igurrutxaga/tvalacarta | python/main-classic/channels/mtv.py | 3 | 7778 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para MTV
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
import urllib
from core import logger
from core import config
from core import scrapertools
from core.item import Item
DEBUG = False
CHANNELNAME = "mtv"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.mtv mainlist")
item = Item(channel=CHANNELNAME, url="http://www.mtv.es/programas/ver/")
return programas(item)
def programas(item):
logger.info("tvalacarta.channels.mtv programas")
itemlist = []
'''
<div class="row row140" >
<div class="thumbcontainer thumb140">
<a href="/programas/100-artistas-mas-sexis/" title="Los 100 artistas más sexis" class="thumblink" >
<img class="thumbnail " src="http://mtv-es.mtvnimages.com/marquee/KYLIE_SEXY_1.jpg?width=140&quality=0.91" alt="Los 100 artistas más sexis" />
</a>
'''
# Extrae las series
data = scrapertools.cachePage(item.url)
patron = '<div class="row row140"[^<]+'
patron += '<div class="thumbcontainer thumb140"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<img class="thumbnail " src="(http://mtv-es.mtvnimages.com/.*?)\?'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
#http://www.mtv.es/programas/destacados/alaska-y-mario/
#http://www.mtv.es/programas/destacados/alaska-y-mario/episodios/
url = urlparse.urljoin(url,"episodios")
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item( channel=item.channel , title=title , action="episodios" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=True ) )
try:
next_page=scrapertools.get_match(data,'<a href="([^"]+)"><span class="link">Pr')
#/videos?prog=3798&v=1&pag=2
itemlist.append( Item( channel=item.channel , title=">> Página siguiente" , action="programas" , url=urlparse.urljoin(item.url,next_page) ) )
except:
pass
return itemlist
def episodios(item):
logger.info("tvalacarta.channels.mtv episodios")
itemlist=[]
'''
<div class="row row160" >
<div class="thumbcontainer thumb160">
<a href="/programas/geordie-shore-6-temporada/episodios/geordie-shore-6-extra/video/geordie-shore-material-extra-979143/" title="Geordie Shore promo temporada 6: ¡en Australia!" class="thumblink" >
<img class="thumbnail " src="http://mtv-es.mtvnimages.com/img/imagenes/promo-gs-new.jpg?height=120&quality=0.91" alt="Geordie Shore promo temporada 6: ¡en Australia!" />
<span class="video"> </span>
</a>
</div>
<div class="link-block">
<a href="/programas/geordie-shore-6-temporada/episodios/geordie-shore-6-extra/video/geordie-shore-material-extra-979143/" title="Geordie Shore promo temporada 6: ¡en Australia!" class="titlelink " >Geordie Shore promo temporada 6: ¡en Australia!</a></div>
<p class="video-description" >Scott y Holly... en su vídeo más íntimo </p>
<p class="morelink" ><a href="/programas/geordie-shore-6-temporada/episodios/geordie-shore-6-extra/">Descripción completa</a> </p>
</div>
'''
# Extrae los episodios
data = scrapertools.cachePage( item.url )
#logger.info("data="+data)
patron = '<div\s+class="row row[^<]+'
patron += '<div\s+class="thumbcontainer thumb[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumblink"[^<]+'
patron += '<img class="thumbnail " src="(http://mtv-es.mtvnimages.com/.*?)\?'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item( channel=item.channel , title=title , action="partes" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=True ) )
try:
#<a href="/programas/destacados/alaska-y-mario/episodios?start_20=20"><span class="link">Próximo</span>
next_page=scrapertools.get_match(data,'<a href="([^"]+)"><span class="link">Pr')
#/videos?prog=3798&v=1&pag=2
itemlist.append( Item( channel=item.channel , title=">> Página siguiente" , action="episodios" , url=urlparse.urljoin(item.url,next_page) ) )
except:
pass
return itemlist
def partes(item):
logger.info("tvalacarta.channels.mtv partes")
itemlist=[]
'''
<div class="row row70 " >
<div class="thumbcontainer thumb70">
<a href="/programas/geordie-shore-6-temporada/episodios/geordie-shore-606/video/geordie-shore-ep-606-parte-2-de-4-986977/" title="Geordie Shore Ep. 606 |Parte 2 de 4|" class="thumblink" >
<img class="thumbnail " src="http://mtv-es.mtvnimages.com/img/imagenes/gs606b.jpg?height=53&quality=0.91" alt="Geordie Shore Ep. 606 |Parte 2 de 4|" />
<span class="video"> </span>
</a>
</div>
'''
# Extrae los episodios
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'<\!-- SM4.0 -->(.*?)<\!-- SM4.0 -->')
patron = '<div class="row row[^<]+'
patron += '<div class="thumbcontainer thumb[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumblink"[^<]+'
patron += '<img class="thumbnail " src="(http://mtv-es.mtvnimages.com/.*?)\?'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item( channel=item.channel , title=title , action="play" , server="mtv" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=False ) )
try:
#<a href="/programas/destacados/alaska-y-mario/episodios?start_20=20"><span class="link">Próximo</span>
next_page=scrapertools.get_match(data,'<a href="([^"]+)"><span class="link">Pr')
#/videos?prog=3798&v=1&pag=2
itemlist.append( Item( channel=item.channel , title=">> Página siguiente" , action="episodios" , url=urlparse.urljoin(item.url,next_page) ) )
except:
pass
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
# El canal tiene estructura
items_mainlist = mainlist(Item())
if len(items_mainlist)==0:
print "No hay programas"
return False
# Ahora recorre los programas hasta encontrar vídeos en alguno
for item_programa in items_mainlist:
print "Verificando "+item_programa.title
items_episodios = episodios(item_programa)
if len(items_episodios)>0:
return True
print "No hay videos en ningún programa"
return False
| gpl-3.0 |
pataquets/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/revertrevision.py | 147 | 1769 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class RevertRevision(AbstractStep):
def run(self, state):
self._tool.checkout().apply_reverse_diffs(state["revision_list"])
self.did_modify_checkout(state)
| bsd-3-clause |
manipopopo/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 14 | 13765 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ZLLab-Mooc/edx-platform | common/lib/xmodule/xmodule/split_test_module.py | 33 | 29834 | """
Module for running content split tests
"""
import logging
import json
from webob import Response
from uuid import uuid4
from operator import itemgetter
from xmodule.progress import Progress
from xmodule.seq_module import SequenceDescriptor
from xmodule.studio_editable import StudioEditableModule, StudioEditableDescriptor
from xmodule.x_module import XModule, module_attr, STUDENT_VIEW
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.modulestore.inheritance import UserPartitionList
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Scope, Integer, String, ReferenceValueDict
from xblock.fragment import Fragment
log = logging.getLogger('edx.' + __name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
DEFAULT_GROUP_NAME = _(u'Group ID {group_id}')
class SplitTestFields(object):
"""Fields needed for split test module"""
has_children = True
# All available user partitions (with value and display name). This is updated each time
# editable_metadata_fields is called.
user_partition_values = []
# Default value used for user_partition_id
no_partition_selected = {'display_name': _("Not Selected"), 'value': -1}
@staticmethod
def build_partition_values(all_user_partitions, selected_user_partition):
"""
This helper method builds up the user_partition values that will
be passed to the Studio editor
"""
SplitTestFields.user_partition_values = []
# Add "No selection" value if there is not a valid selected user partition.
if not selected_user_partition:
SplitTestFields.user_partition_values.append(SplitTestFields.no_partition_selected)
for user_partition in get_split_user_partitions(all_user_partitions):
SplitTestFields.user_partition_values.append(
{"display_name": user_partition.name, "value": user_partition.id}
)
return SplitTestFields.user_partition_values
display_name = String(
display_name=_("Display Name"),
help=_("This name is used for organizing your course content, but is not shown to students."),
scope=Scope.settings,
default=_("Content Experiment")
)
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
user_partition_id = Integer(
help=_("The configuration defines how users are grouped for this content experiment. Caution: Changing the group configuration of a student-visible experiment will impact the experiment data."),
scope=Scope.content,
display_name=_("Group Configuration"),
default=no_partition_selected["value"],
values=lambda: SplitTestFields.user_partition_values # Will be populated before the Studio editor is shown.
)
# group_id is an int
# child is a serialized UsageId (aka Location). This child
# location needs to actually match one of the children of this
# Block. (expected invariant that we'll need to test, and handle
# authoring tools that mess this up)
group_id_to_child = ReferenceValueDict(
help=_("Which child module students in a particular group_id should see"),
scope=Scope.content
)
def get_split_user_partitions(user_partitions):
"""
Helper method that filters a list of user_partitions and returns just the
ones that are suitable for the split_test module.
"""
return [user_partition for user_partition in user_partitions if user_partition.scheme.name == "random"]
@XBlock.needs('user_tags') # pylint: disable=abstract-method
@XBlock.wants('partitions')
class SplitTestModule(SplitTestFields, XModule, StudioEditableModule):
"""
Show the user the appropriate child. Uses the ExperimentState
API to figure out which child to show.
Course staff still get put in an experimental condition, but have the option
to see the other conditions. The only thing that counts toward their
grade/progress is the condition they are actually in.
Technical notes:
- There is more dark magic in this code than I'd like. The whole varying-children +
grading interaction is a tangle between super and subclasses of descriptors and
modules.
"""
def __init__(self, *args, **kwargs):
super(SplitTestModule, self).__init__(*args, **kwargs)
self.child_descriptor = None
child_descriptors = self.get_child_descriptors()
if len(child_descriptors) >= 1:
self.child_descriptor = child_descriptors[0]
if self.child_descriptor is not None:
self.child = self.system.get_module(self.child_descriptor)
else:
self.child = None
def get_child_descriptor_by_location(self, location):
"""
Look through the children and look for one with the given location.
Returns the descriptor.
If none match, return None
"""
# NOTE: calling self.get_children() creates a circular reference--
# it calls get_child_descriptors() internally, but that doesn't work until
# we've picked a choice. Use self.descriptor.get_children() instead.
for child in self.descriptor.get_children():
if child.location == location:
return child
return None
def get_content_titles(self):
"""
Returns list of content titles for split_test's child.
This overwrites the get_content_titles method included in x_module by default.
WHY THIS OVERWRITE IS NECESSARY: If we fetch *all* of split_test's children,
we'll end up getting all of the possible conditions users could ever see.
Ex: If split_test shows a video to group A and HTML to group B, the
regular get_content_titles in x_module will get the title of BOTH the video
AND the HTML.
We only want the content titles that should actually be displayed to the user.
split_test's .child property contains *only* the child that should actually
be shown to the user, so we call get_content_titles() on only that child.
"""
return self.child.get_content_titles()
def get_child_descriptors(self):
"""
For grading--return just the chosen child.
"""
group_id = self.get_group_id()
if group_id is None:
return []
# group_id_to_child comes from json, so it has to have string keys
str_group_id = str(group_id)
if str_group_id in self.group_id_to_child:
child_location = self.group_id_to_child[str_group_id]
child_descriptor = self.get_child_descriptor_by_location(child_location)
else:
# Oops. Config error.
log.debug("configuration error in split test module: invalid group_id %r (not one of %r). Showing error", str_group_id, self.group_id_to_child.keys())
if child_descriptor is None:
# Peak confusion is great. Now that we set child_descriptor,
# get_children() should return a list with one element--the
# xmodule for the child
log.debug("configuration error in split test module: no such child")
return []
return [child_descriptor]
def get_group_id(self):
"""
Returns the group ID, or None if none is available.
"""
partitions_service = self.runtime.service(self, 'partitions')
if not partitions_service:
return None
return partitions_service.get_user_group_id_for_partition(self.user_partition_id)
@property
def is_configured(self):
"""
Returns true if the split_test instance is associated with a UserPartition.
"""
return self.descriptor.is_configured
def _staff_view(self, context):
"""
Render the staff view for a split test module.
"""
fragment = Fragment()
active_contents = []
inactive_contents = []
for child_location in self.children: # pylint: disable=no-member
child_descriptor = self.get_child_descriptor_by_location(child_location)
child = self.system.get_module(child_descriptor)
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
group_name, updated_group_id = self.get_data_for_vertical(child)
if updated_group_id is None: # inactive group
group_name = child.display_name
updated_group_id = [g_id for g_id, loc in self.group_id_to_child.items() if loc == child_location][0]
inactive_contents.append({
'group_name': _(u'{group_name} (inactive)').format(group_name=group_name),
'id': child.location.to_deprecated_string(),
'content': rendered_child.content,
'group_id': updated_group_id,
})
continue
active_contents.append({
'group_name': group_name,
'id': child.location.to_deprecated_string(),
'content': rendered_child.content,
'group_id': updated_group_id,
})
# Sort active and inactive contents by group name.
sorted_active_contents = sorted(active_contents, key=itemgetter('group_name'))
sorted_inactive_contents = sorted(inactive_contents, key=itemgetter('group_name'))
# Use the new template
fragment.add_content(self.system.render_template('split_test_staff_view.html', {
'items': sorted_active_contents + sorted_inactive_contents,
}))
fragment.add_css('.split-test-child { display: none; }')
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_staff.js'))
fragment.initialize_js('ABTestSelector')
return fragment
def author_view(self, context):
"""
Renders the Studio preview by rendering each child so that they can all be seen and edited.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
active_groups_preview = None
inactive_groups_preview = None
if is_root:
[active_children, inactive_children] = self.descriptor.active_and_inactive_children()
active_groups_preview = self.studio_render_children(
fragment, active_children, context
)
inactive_groups_preview = self.studio_render_children(
fragment, inactive_children, context
)
fragment.add_content(self.system.render_template('split_test_author_view.html', {
'split_test': self,
'is_root': is_root,
'is_configured': self.is_configured,
'active_groups_preview': active_groups_preview,
'inactive_groups_preview': inactive_groups_preview,
'group_configuration_url': self.descriptor.group_configuration_url,
}))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_author_view.js'))
fragment.initialize_js('SplitTestAuthorView')
return fragment
def studio_render_children(self, fragment, children, context):
"""
Renders the specified children and returns it as an HTML string. In addition, any
dependencies are added to the specified fragment.
"""
html = ""
for active_child_descriptor in children:
active_child = self.system.get_module(active_child_descriptor)
rendered_child = active_child.render(StudioEditableModule.get_preview_view_name(active_child), context)
if active_child.category == 'vertical':
group_name, group_id = self.get_data_for_vertical(active_child)
if group_name:
rendered_child.content = rendered_child.content.replace(
DEFAULT_GROUP_NAME.format(group_id=group_id),
group_name
)
fragment.add_frag_resources(rendered_child)
html = html + rendered_child.content
return html
def student_view(self, context):
"""
Renders the contents of the chosen condition for students, and all the
conditions for staff.
"""
if self.child is None:
# raise error instead? In fact, could complain on descriptor load...
return Fragment(content=u"<div>Nothing here. Move along.</div>")
if self.system.user_is_staff:
return self._staff_view(context)
else:
child_fragment = self.child.render(STUDENT_VIEW, context)
fragment = Fragment(self.system.render_template('split_test_student_view.html', {
'child_content': child_fragment.content,
'child_id': self.child.scope_ids.usage_id,
}))
fragment.add_frag_resources(child_fragment)
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_student.js'))
fragment.initialize_js('SplitTestStudentView')
return fragment
@XBlock.handler
def log_child_render(self, request, suffix=''): # pylint: disable=unused-argument
"""
Record in the tracking logs which child was rendered
"""
# TODO: use publish instead, when publish is wired to the tracking logs
self.system.track_function('xblock.split_test.child_render', {'child_id': self.child.scope_ids.usage_id.to_deprecated_string()})
return Response()
def get_icon_class(self):
return self.child.get_icon_class() if self.child else 'other'
def get_progress(self):
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def get_data_for_vertical(self, vertical):
"""
Return name and id of a group corresponding to `vertical`.
"""
user_partition = self.descriptor.get_selected_partition()
if user_partition:
for group in user_partition.groups:
group_id = unicode(group.id)
child_location = self.group_id_to_child.get(group_id, None)
if child_location == vertical.location:
return (group.name, group.id)
return (None, None)
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
@XBlock.needs('user_tags') # pylint: disable=abstract-method
@XBlock.wants('partitions')
@XBlock.wants('user')
class SplitTestDescriptor(SplitTestFields, SequenceDescriptor, StudioEditableDescriptor):
# the editing interface can be the same as for sequences -- just a container
module_class = SplitTestModule
filename_extension = "xml"
mako_template = "widgets/metadata-only-edit.html"
show_in_read_only_mode = True
child_descriptor = module_attr('child_descriptor')
log_child_render = module_attr('log_child_render')
get_content_titles = module_attr('get_content_titles')
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('split_test')
renderable_groups = {}
# json.dumps doesn't know how to handle Location objects
for group in self.group_id_to_child:
renderable_groups[group] = self.group_id_to_child[group].to_deprecated_string()
xml_object.set('group_id_to_child', json.dumps(renderable_groups))
xml_object.set('user_partition_id', str(self.user_partition_id))
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
raw_group_id_to_child = xml_object.attrib.get('group_id_to_child', None)
user_partition_id = xml_object.attrib.get('user_partition_id', None)
try:
group_id_to_child = json.loads(raw_group_id_to_child)
except ValueError:
msg = "group_id_to_child is not valid json"
log.exception(msg)
system.error_tracker(msg)
for child in xml_object:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except Exception:
msg = "Unable to load child when parsing split_test module."
log.exception(msg)
system.error_tracker(msg)
return ({
'group_id_to_child': group_id_to_child,
'user_partition_id': user_partition_id
}, children)
def get_context(self):
_context = super(SplitTestDescriptor, self).get_context()
_context.update({
'selected_partition': self.get_selected_partition()
})
return _context
def has_dynamic_children(self):
"""
Grading needs to know that only one of the children is actually "real". This
makes it use module.get_child_descriptors().
"""
return True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to create default verticals for the groups.
Assumes that a mutable modulestore is being used.
"""
# Any existing value of user_partition_id will be in "old_content" instead of "old_metadata"
# because it is Scope.content.
if 'user_partition_id' not in old_content or old_content['user_partition_id'] != self.user_partition_id:
selected_partition = self.get_selected_partition()
if selected_partition is not None:
self.group_id_mapping = {} # pylint: disable=attribute-defined-outside-init
for group in selected_partition.groups:
self._create_vertical_for_group(group, user.id)
# Don't need to call update_item in the modulestore because the caller of this method will do it.
else:
# If children referenced in group_id_to_child have been deleted, remove them from the map.
for str_group_id, usage_key in self.group_id_to_child.items():
if usage_key not in self.children: # pylint: disable=no-member
del self.group_id_to_child[str_group_id]
@property
def editable_metadata_fields(self):
# Update the list of partitions based on the currently available user_partitions.
SplitTestFields.build_partition_values(self.user_partitions, self.get_selected_partition())
editable_fields = super(SplitTestDescriptor, self).editable_metadata_fields
# Explicitly add user_partition_id, which does not automatically get picked up because it is Scope.content.
# Note that this means it will be saved by the Studio editor as "metadata", but the field will
# still update correctly.
editable_fields[SplitTestFields.user_partition_id.name] = self._create_metadata_editor_info(
SplitTestFields.user_partition_id
)
return editable_fields
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(SplitTestDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
SplitTestDescriptor.due,
SplitTestDescriptor.user_partitions,
SplitTestDescriptor.group_id_to_child,
])
return non_editable_fields
def get_selected_partition(self):
"""
Returns the partition that this split module is currently using, or None
if the currently selected partition ID does not match any of the defined partitions.
"""
for user_partition in self.user_partitions:
if user_partition.id == self.user_partition_id:
return user_partition
return None
def active_and_inactive_children(self):
"""
Returns two values:
1. The active children of this split test, in the order of the groups.
2. The remaining (inactive) children, in the order they were added to the split test.
"""
children = self.get_children()
user_partition = self.get_selected_partition()
if not user_partition:
return [], children
def get_child_descriptor(location):
"""
Returns the child descriptor which matches the specified location, or None if one is not found.
"""
for child in children:
if child.location == location:
return child
return None
# Compute the active children in the order specified by the user partition
active_children = []
for group in user_partition.groups:
group_id = unicode(group.id)
child_location = self.group_id_to_child.get(group_id, None)
child = get_child_descriptor(child_location)
if child:
active_children.append(child)
# Compute the inactive children in the order they were added to the split test
inactive_children = [child for child in children if child not in active_children]
return active_children, inactive_children
@property
def is_configured(self):
"""
Returns true if the split_test instance is associated with a UserPartition.
"""
return not self.user_partition_id == SplitTestFields.no_partition_selected['value']
def validate(self):
"""
Validates the state of this split_test instance. This is the override of the general XBlock method,
and it will also ask its superclass to validate.
"""
validation = super(SplitTestDescriptor, self).validate()
split_test_validation = self.validate_split_test()
if split_test_validation:
return validation
validation = StudioValidation.copy(validation)
if validation and (not self.is_configured and len(split_test_validation.messages) == 1):
validation.summary = split_test_validation.messages[0]
else:
validation.summary = self.general_validation_message(split_test_validation)
validation.add_messages(split_test_validation)
return validation
def validate_split_test(self):
"""
Returns a StudioValidation object describing the current state of the split_test_module
(not including superclass validation messages).
"""
_ = self.runtime.service(self, "i18n").ugettext
split_validation = StudioValidation(self.location)
if self.user_partition_id < 0:
split_validation.add(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"The experiment is not associated with a group configuration."),
action_class='edit-button',
action_label=_(u"Select a Group Configuration")
)
)
else:
user_partition = self.get_selected_partition()
if not user_partition:
split_validation.add(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u"The experiment uses a deleted group configuration. Select a valid group configuration or delete this experiment.")
)
)
else:
# If the user_partition selected is not valid for the split_test module, error.
# This can only happen via XML and import/export.
if not get_split_user_partitions([user_partition]):
split_validation.add(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u"The experiment uses a group configuration that is not supported for experiments. "
u"Select a valid group configuration or delete this experiment.")
)
)
else:
[active_children, inactive_children] = self.active_and_inactive_children()
if len(active_children) < len(user_partition.groups):
split_validation.add(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u"The experiment does not contain all of the groups in the configuration."),
action_runtime_event='add-missing-groups',
action_label=_(u"Add Missing Groups")
)
)
if len(inactive_children) > 0:
split_validation.add(
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u"The experiment has an inactive group. "
u"Move content into active groups, then delete the inactive group.")
)
)
return split_validation
def general_validation_message(self, validation=None):
"""
Returns just a summary message about whether or not this split_test instance has
validation issues (not including superclass validation messages). If the split_test instance
validates correctly, this method returns None.
"""
if validation is None:
validation = self.validate_split_test()
if not validation:
has_error = any(message.type == StudioValidationMessage.ERROR for message in validation.messages)
return StudioValidationMessage(
StudioValidationMessage.ERROR if has_error else StudioValidationMessage.WARNING,
_(u"This content experiment has issues that affect content visibility.")
)
return None
@XBlock.handler
def add_missing_groups(self, request, suffix=''): # pylint: disable=unused-argument
"""
Create verticals for any missing groups in the split test instance.
Called from Studio view.
"""
user_service = self.runtime.service(self, 'user')
if user_service is None:
return Response()
user_partition = self.get_selected_partition()
changed = False
for group in user_partition.groups:
str_group_id = unicode(group.id)
if str_group_id not in self.group_id_to_child:
user_id = self.runtime.service(self, 'user').get_current_user().opt_attrs['edx-platform.user_id']
self._create_vertical_for_group(group, user_id)
changed = True
if changed:
# user.id - to be fixed by Publishing team
self.system.modulestore.update_item(self, None)
return Response()
@property
def group_configuration_url(self):
assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'get_course'), \
"modulestore has to be available"
course_module = self.system.modulestore.get_course(self.location.course_key)
group_configuration_url = None
if 'split_test' in course_module.advanced_modules:
user_partition = self.get_selected_partition()
if user_partition:
group_configuration_url = "{url}#{configuration_id}".format(
url='/group_configurations/' + unicode(self.location.course_key),
configuration_id=str(user_partition.id)
)
return group_configuration_url
def _create_vertical_for_group(self, group, user_id):
"""
Creates a vertical to associate with the group.
This appends the new vertical to the end of children, and updates group_id_to_child.
A mutable modulestore is needed to call this method (will need to update after mixed
modulestore work, currently relies on mongo's create_item method).
"""
assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'create_item'), \
"editor_saved should only be called when a mutable modulestore is available"
modulestore = self.system.modulestore
dest_usage_key = self.location.replace(category="vertical", name=uuid4().hex)
metadata = {'display_name': DEFAULT_GROUP_NAME.format(group_id=group.id)}
modulestore.create_item(
user_id,
self.location.course_key,
dest_usage_key.block_type,
block_id=dest_usage_key.block_id,
definition_data=None,
metadata=metadata,
runtime=self.system,
)
self.children.append(dest_usage_key) # pylint: disable=no-member
self.group_id_to_child[unicode(group.id)] = dest_usage_key
| agpl-3.0 |
tinfoil/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py | 117 | 46271 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A helper class for reading in and dealing with tests expectations
for layout tests.
"""
import logging
import re
from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
_log = logging.getLogger(__name__)
# Test expectation and modifier constants.
#
# FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, SKIP, WONTFIX,
SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
# FIXME: Perhas these two routines should be part of the Port instead?
BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
self.warnings = warnings
def __str__(self):
return '\n'.join(map(str, self.warnings))
def __repr__(self):
return 'ParseError(warnings=%s)' % self.warnings
class TestExpectationParser(object):
"""Provides parsing facilities for lines in the test_expectation.txt file."""
DUMMY_BUG_MODIFIER = "bug_dummy"
BUG_MODIFIER_PREFIX = 'bug'
BUG_MODIFIER_REGEX = 'bug\d+'
REBASELINE_MODIFIER = 'rebaseline'
PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow'
WONTFIX_MODIFIER = 'wontfix'
TIMEOUT_EXPECTATION = 'timeout'
MISSING_BUG_WARNING = 'Test lacks BUG modifier.'
def __init__(self, port, full_test_list, allow_rebaseline_modifier):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
self._full_test_list = full_test_list
self._allow_rebaseline_modifier = allow_rebaseline_modifier
def parse(self, filename, expectations_string):
expectation_lines = []
line_number = 0
for line in expectations_string.split("\n"):
line_number += 1
test_expectation = self._tokenize_line(filename, line, line_number)
self._parse_line(test_expectation)
expectation_lines.append(test_expectation)
return expectation_lines
def expectation_for_skipped_test(self, test_name):
if not self._port.test_exists(test_name):
_log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.modifiers = [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER]
# FIXME: It's not clear what the expectations for a skipped test should be; the expectations
# might be different for different entries in a Skipped file, or from the command line, or from
# only running parts of the tests. It's also not clear if it matters much.
expectation_line.modifiers.append(TestExpectationParser.WONTFIX_MODIFIER)
expectation_line.name = test_name
# FIXME: we should pass in a more descriptive string here.
expectation_line.filename = '<Skipped file>'
expectation_line.line_number = 0
expectation_line.expectations = [TestExpectationParser.PASS_EXPECTATION]
self._parse_line(expectation_line)
return expectation_line
def _parse_line(self, expectation_line):
if not expectation_line.name:
return
if not self._check_test_exists(expectation_line):
return
expectation_line.is_file = self._port.test_isfile(expectation_line.name)
if expectation_line.is_file:
expectation_line.path = expectation_line.name
else:
expectation_line.path = self._port.normalize_test_name(expectation_line.name)
self._collect_matching_tests(expectation_line)
self._parse_modifiers(expectation_line)
self._parse_expectations(expectation_line)
def _parse_modifiers(self, expectation_line):
has_wontfix = False
has_bugid = False
parsed_specifiers = set()
modifiers = [modifier.lower() for modifier in expectation_line.modifiers]
expectations = [expectation.lower() for expectation in expectation_line.expectations]
if self.SLOW_MODIFIER in modifiers and self.TIMEOUT_EXPECTATION in expectations:
expectation_line.warnings.append('A test can not be both SLOW and TIMEOUT. If it times out indefinitely, then it should be just TIMEOUT.')
for modifier in modifiers:
if modifier in TestExpectations.MODIFIERS:
expectation_line.parsed_modifiers.append(modifier)
if modifier == self.WONTFIX_MODIFIER:
has_wontfix = True
elif modifier.startswith(self.BUG_MODIFIER_PREFIX):
has_bugid = True
if re.match(self.BUG_MODIFIER_REGEX, modifier):
expectation_line.warnings.append('BUG\d+ is not allowed, must be one of BUGCR\d+, BUGWK\d+, BUGV8_\d+, or a non-numeric bug identifier.')
else:
expectation_line.parsed_bug_modifiers.append(modifier)
else:
parsed_specifiers.add(modifier)
if not expectation_line.parsed_bug_modifiers and not has_wontfix and not has_bugid and self._port.warn_if_bug_missing_in_test_expectations():
expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self._allow_rebaseline_modifier and self.REBASELINE_MODIFIER in modifiers:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
def _parse_expectations(self, expectation_line):
result = set()
for part in expectation_line.expectations:
expectation = TestExpectations.expectation_from_string(part)
if expectation is None: # Careful, PASS is currently 0.
expectation_line.warnings.append('Unsupported expectation: %s' % part)
continue
result.add(expectation)
expectation_line.parsed_expectations = result
def _check_test_exists(self, expectation_line):
# WebKit's way of skipping tests is to add a -disabled suffix.
# So we should consider the path existing if the path or the
# -disabled version exists.
if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
# Log a warning here since you hit this case any
# time you update TestExpectations without syncing
# the LayoutTests directory
expectation_line.warnings.append('Path does not exist.')
return False
return True
def _collect_matching_tests(self, expectation_line):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
# FIXME: full_test_list can quickly contain a big amount of
# elements. We should consider at some point to use a more
# efficient structure instead of a list. Maybe a dictionary of
# lists to represent the tree of tests, leaves being test
# files and nodes being categories.
if not self._full_test_list:
expectation_line.matching_tests = [expectation_line.path]
return
if not expectation_line.is_file:
# this is a test category, return all the tests of the category.
expectation_line.matching_tests = [test for test in self._full_test_list if test.startswith(expectation_line.path)]
return
# this is a test file, do a quick check if it's in the
# full test suite.
if expectation_line.path in self._full_test_list:
expectation_line.matching_tests.append(expectation_line.path)
# FIXME: Update the original modifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'SnowLeopard', 'Lion', 'MountainLion',
'Win', 'XP', 'Vista', 'Win7',
'Linux',
'Android',
'Release',
'Debug',
]
_configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
_inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
# FIXME: Update the original modifiers list and remove this once the old syntax is gone.
_expectation_tokens = {
'Crash': 'CRASH',
'Failure': 'FAIL',
'ImageOnlyFailure': 'IMAGE',
'Missing': 'MISSING',
'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
'Skip': 'SKIP',
'Slow': 'SLOW',
'Timeout': 'TIMEOUT',
'WontFix': 'WONTFIX',
}
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
# FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
[[bugs] [ "[" <configuration modifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
Any errant whitespace is not preserved.
"""
expectation_line = TestExpectationLine()
expectation_line.original_string = expectation_string
expectation_line.filename = filename
expectation_line.line_number = line_number
comment_index = expectation_string.find("#")
if comment_index == -1:
comment_index = len(expectation_string)
else:
expectation_line.comment = expectation_string[comment_index + 1:]
remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
if len(remaining_string) == 0:
return expectation_line
# special-case parsing this so that we fail immediately instead of treating this as a test name
if remaining_string.startswith('//'):
expectation_line.warnings = ['use "#" instead of "//" for comments']
return expectation_line
bugs = []
modifiers = []
name = None
expectations = []
warnings = []
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
tokens = remaining_string.split()
state = 'start'
for token in tokens:
if token.startswith(WEBKIT_BUG_PREFIX) or token.startswith('Bug('):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token.replace(WEBKIT_BUG_PREFIX, 'BUGWK'))
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
warnings.append('unrecognized bug identifier "%s"' % token)
break
else:
bugs.append('BUG' + match.group(1).upper())
elif token.startswith('BUG'):
warnings.append('unrecognized old-style bug identifier "%s"' % token)
break
elif token == '[':
if state == 'start':
state = 'configuration'
elif state == 'name_found':
state = 'expectations'
else:
warnings.append('unexpected "["')
break
elif token == ']':
if state == 'configuration':
state = 'name'
elif state == 'expectations':
state = 'done'
else:
warnings.append('unexpected "]"')
break
elif token in ('//', ':', '='):
warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
break
elif state == 'configuration':
modifiers.append(cls._configuration_tokens.get(token, token))
elif state == 'expectations':
if token in ('Rebaseline', 'Skip', 'Slow', 'WontFix'):
modifiers.append(token.upper())
elif token not in cls._expectation_tokens:
warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
break
else:
name = token
state = 'name_found'
if not warnings:
if not name:
warnings.append('Did not find a test name.')
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
if 'WONTFIX' in modifiers and 'SKIP' not in modifiers and not expectations:
modifiers.append('SKIP')
if 'SKIP' in modifiers and expectations:
# FIXME: This is really a semantic warning and shouldn't be here. Remove when we drop the old syntax.
warnings.append('A test marked Skip must not have other expectations.')
elif not expectations:
if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'SLOW' not in modifiers:
modifiers.append('SKIP')
expectations = ['PASS']
# FIXME: expectation line should just store bugs and modifiers separately.
expectation_line.modifiers = bugs + modifiers
expectation_line.expectations = expectations
expectation_line.name = name
expectation_line.warnings = warnings
return expectation_line
@classmethod
def _split_space_separated(cls, space_separated_string):
"""Splits a space-separated string into an array."""
return [part.strip() for part in space_separated_string.strip().split(' ')]
class TestExpectationLine(object):
"""Represents a line in test expectations file."""
def __init__(self):
"""Initializes a blank-line equivalent of an expectation."""
self.original_string = None
self.filename = None # this is the path to the expectations file for this line
self.line_number = None
self.name = None # this is the path in the line itself
self.path = None # this is the normpath of self.name
self.modifiers = []
self.parsed_modifiers = []
self.parsed_bug_modifiers = []
self.matching_configurations = set()
self.expectations = []
self.parsed_expectations = set()
self.comment = None
self.matching_tests = []
self.warnings = []
def is_invalid(self):
return self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING]
def is_flaky(self):
return len(self.parsed_expectations) > 1
@staticmethod
def create_passing_expectation(test):
expectation_line = TestExpectationLine()
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
def to_string(self, test_configuration_converter, include_modifiers=True, include_expectations=True, include_comment=True):
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
if self.is_invalid():
return self.original_string or ''
if self.name is None:
return '' if self.comment is None else "#%s" % self.comment
if test_configuration_converter and self.parsed_bug_modifiers:
specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
result = []
for specifiers in specifiers_list:
# FIXME: this is silly that we join the modifiers and then immediately split them.
modifiers = self._serialize_parsed_modifiers(test_configuration_converter, specifiers).split()
expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
result.append(self._format_line(modifiers, self.name, expectations, self.comment))
return "\n".join(result) if result else None
return self._format_line(self.modifiers, self.name, self.expectations, self.comment,
include_modifiers, include_expectations, include_comment)
def to_csv(self):
# Note that this doesn't include the comments.
return '%s,%s,%s' % (self.name, ' '.join(self.modifiers), ' '.join(self.expectations))
def _serialize_parsed_expectations(self, parsed_expectation_to_string):
result = []
for index in TestExpectations.EXPECTATION_ORDER:
if index in self.parsed_expectations:
result.append(parsed_expectation_to_string[index])
return ' '.join(result)
def _serialize_parsed_modifiers(self, test_configuration_converter, specifiers):
result = []
if self.parsed_bug_modifiers:
result.extend(sorted(self.parsed_bug_modifiers))
result.extend(sorted(self.parsed_modifiers))
result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
return ' '.join(result)
@staticmethod
def _format_line(modifiers, name, expectations, comment, include_modifiers=True, include_expectations=True, include_comment=True):
bugs = []
new_modifiers = []
new_expectations = []
for modifier in modifiers:
modifier = modifier.upper()
if modifier.startswith('BUGWK'):
bugs.append('webkit.org/b/' + modifier.replace('BUGWK', ''))
elif modifier.startswith('BUGCR'):
bugs.append('crbug.com/' + modifier.replace('BUGCR', ''))
elif modifier.startswith('BUG'):
# FIXME: we should preserve case once we can drop the old syntax.
bugs.append('Bug(' + modifier[3:].lower() + ')')
elif modifier in ('SLOW', 'SKIP', 'REBASELINE', 'WONTFIX'):
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(modifier))
else:
new_modifiers.append(TestExpectationParser._inverted_configuration_tokens.get(modifier, modifier))
for expectation in expectations:
expectation = expectation.upper()
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
result = ''
if include_modifiers and (bugs or new_modifiers):
if bugs:
result += ' '.join(bugs) + ' '
if new_modifiers:
result += '[ %s ] ' % ' '.join(new_modifiers)
result += name
if include_expectations and new_expectations and set(new_expectations) != set(['Skip', 'Pass']):
result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
if include_comment and comment is not None:
result += " #%s" % comment
return result
# FIXME: Refactor API to be a proper CRUD.
class TestExpectationsModel(object):
"""Represents relational store of all expectations and provides CRUD semantics to manage it."""
def __init__(self, shorten_filename=None):
# Maps a test to its list of expectations.
self._test_to_expectations = {}
# Maps a test to list of its modifiers (string values)
self._test_to_modifiers = {}
# Maps a test to a TestExpectationLine instance.
self._test_to_expectation_line = {}
self._modifier_to_tests = self._dict_of_sets(TestExpectations.MODIFIERS)
self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
self._shorten_filename = shorten_filename or (lambda x: x)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
each constant to an empty set."""
d = {}
for c in strings_to_constants.values():
d[c] = set()
return d
def get_test_set(self, modifier, expectation=None, include_skips=True):
if expectation is None:
tests = self._modifier_to_tests[modifier]
else:
tests = (self._expectation_to_tests[expectation] &
self._modifier_to_tests[modifier])
if not include_skips:
tests = tests - self.get_test_set(SKIP, expectation)
return tests
def get_test_set_for_keyword(self, keyword):
# FIXME: get_test_set() is an awkward public interface because it requires
# callers to know the difference between modifiers and expectations. We
# should replace that with this where possible.
expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
if expectation_enum is not None:
return self._expectation_to_tests[expectation_enum]
modifier_enum = TestExpectations.MODIFIERS.get(keyword.lower(), None)
if modifier_enum is not None:
return self._modifier_to_tests[modifier_enum]
# We must not have an index on this modifier.
matching_tests = set()
for test, modifiers in self._test_to_modifiers.iteritems():
if keyword.lower() in modifiers:
matching_tests.add(test)
return matching_tests
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
def get_tests_with_timeline(self, timeline):
return self._timeline_to_tests[timeline]
def get_modifiers(self, test):
"""This returns modifiers for the given test (the modifiers plus the BUGXXXX identifier). This is used by the LTTF dashboard."""
return self._test_to_modifiers[test]
def has_modifier(self, test, modifier):
return test in self._modifier_to_tests[modifier]
def has_keyword(self, test, keyword):
return (keyword.upper() in self.get_expectations_string(test) or
keyword.lower() in self.get_modifiers(test))
def has_test(self, test):
return test in self._test_to_expectation_line
def get_expectation_line(self, test):
return self._test_to_expectation_line.get(test)
def get_expectations(self, test):
return self._test_to_expectations[test]
def get_expectations_string(self, test):
"""Returns the expectatons for the given test as an uppercase string.
If there are no expectations for the test, then "PASS" is returned."""
expectations = self.get_expectations(test)
retval = []
for expectation in expectations:
retval.append(self.expectation_to_string(expectation))
return " ".join(retval)
def expectation_to_string(self, expectation):
"""Return the uppercased string equivalent of a given expectation."""
for item in TestExpectations.EXPECTATIONS.items():
if item[1] == expectation:
return item[0].upper()
raise ValueError(expectation)
def add_expectation_line(self, expectation_line, in_skipped=False):
"""Returns a list of warnings encountered while matching modifiers."""
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
if not in_skipped and self._already_seen_better_match(test, expectation_line):
continue
self._clear_expectations_for_test(test)
self._test_to_expectation_line[test] = expectation_line
self._add_test(test, expectation_line)
def _add_test(self, test, expectation_line):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
use _clear_expectations_for_test() to reset the state prior to
calling this."""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
self._test_to_modifiers[test] = expectation_line.modifiers
for modifier in expectation_line.parsed_modifiers:
mod_value = TestExpectations.MODIFIERS[modifier]
self._modifier_to_tests[mod_value].add(test)
if TestExpectationParser.WONTFIX_MODIFIER in expectation_line.parsed_modifiers:
self._timeline_to_tests[WONTFIX].add(test)
else:
self._timeline_to_tests[NOW].add(test)
if TestExpectationParser.SKIP_MODIFIER in expectation_line.parsed_modifiers:
self._result_type_to_tests[SKIP].add(test)
elif expectation_line.parsed_expectations == set([PASS]):
self._result_type_to_tests[PASS].add(test)
elif expectation_line.is_flaky():
self._result_type_to_tests[FLAKY].add(test)
else:
# FIXME: What is this?
self._result_type_to_tests[FAIL].add(test)
def _clear_expectations_for_test(self, test):
"""Remove prexisting expectations for this test.
This happens if we are seeing a more precise path
than a previous listing.
"""
if self.has_test(test):
self._test_to_expectations.pop(test, '')
self._remove_from_sets(test, self._expectation_to_tests)
self._remove_from_sets(test, self._modifier_to_tests)
self._remove_from_sets(test, self._timeline_to_tests)
self._remove_from_sets(test, self._result_type_to_tests)
def _remove_from_sets(self, test, dict_of_sets_of_tests):
"""Removes the given test from the sets in the dictionary.
Args:
test: test to look for
dict: dict of sets of files"""
for set_of_tests in dict_of_sets_of_tests.itervalues():
if test in set_of_tests:
set_of_tests.remove(test)
def _already_seen_better_match(self, test, expectation_line):
"""Returns whether we've seen a better match already in the file.
Returns True if we've already seen a expectation_line.name that matches more of the test
than this path does
"""
# FIXME: See comment below about matching test configs and specificity.
if not self.has_test(test):
# We've never seen this test before.
return False
prev_expectation_line = self._test_to_expectation_line[test]
if prev_expectation_line.filename != expectation_line.filename:
# We've moved on to a new expectation file, which overrides older ones.
return False
if len(prev_expectation_line.path) > len(expectation_line.path):
# The previous path matched more of the test.
return True
if len(prev_expectation_line.path) < len(expectation_line.path):
# This path matches more of the test.
return False
# At this point we know we have seen a previous exact match on this
# base path, so we need to check the two sets of modifiers.
# FIXME: This code was originally designed to allow lines that matched
# more modifiers to override lines that matched fewer modifiers.
# However, we currently view these as errors.
#
# To use the "more modifiers wins" policy, change the errors for overrides
# to be warnings and return False".
if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%d and %s:%d.' % (
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
self._shorten_filename(expectation_line.filename), expectation_line.line_number))
return True
if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%d overrides line %s:%d.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
self._shorten_filename(expectation_line.filename), expectation_line.line_number))
# FIXME: return False if we want more specific to win.
return True
if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%d overrides line %s:%d.' % (expectation_line.name,
self._shorten_filename(expectation_line.filename), expectation_line.line_number,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number))
return True
if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
expectation_line.warnings.append('Entries for %s on lines %s:%d and %s:%d match overlapping sets of configurations.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
self._shorten_filename(expectation_line.filename), expectation_line.line_number))
return True
# Configuration sets are disjoint, then.
return False
class TestExpectations(object):
"""Test expectations consist of lines with specifications of what
to expect from layout test cases. The test cases can be directories
in which case the expectations apply to all test cases in that
directory and any subdirectory. The format is along the lines of:
LayoutTests/fast/js/fixme.js [ Failure ]
LayoutTests/fast/js/flaky.js [ Failure Pass ]
LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
...
To add modifiers:
LayoutTests/fast/js/no-good.js
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
Skip: Doesn't run the test.
Slow: The test takes a long time to run, but does not timeout indefinitely.
WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
Notes:
-A test cannot be both SLOW and TIMEOUT
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
"""
# FIXME: Update to new syntax once the old format is no longer supported.
EXPECTATIONS = {'pass': PASS,
'audio': AUDIO,
'fail': FAIL,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
'missing': MISSING,
'skip': SKIP}
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
PASS: 'passes',
FAIL: 'failures',
IMAGE: 'image-only failures',
TEXT: 'text-only failures',
IMAGE_PLUS_TEXT: 'image and text failures',
AUDIO: 'audio failures',
CRASH: 'crashes',
TIMEOUT: 'timeouts',
MISSING: 'missing results'}
EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, FAIL, IMAGE, SKIP)
BUILD_TYPES = ('debug', 'release')
MODIFIERS = {TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
'none': NONE}
TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
'now': NOW}
RESULT_TYPES = {'skip': SKIP,
'pass': PASS,
'fail': FAIL,
'flaky': FLAKY}
@classmethod
def expectation_from_string(cls, string):
assert(' ' not in string) # This only handles one expectation at a time.
return cls.EXPECTATIONS.get(string.lower())
@staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining, test_is_skipped):
"""Returns whether we got a result we were expecting.
Args:
result: actual result of a test execution
expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE
test_is_skipped: whether test was marked as SKIP"""
if result in expected_results:
return True
if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
return True
if result == MISSING and test_needs_rebaselining:
return True
if result == SKIP and test_is_skipped:
return True
return False
@staticmethod
def remove_pixel_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any pixel failures and return the remaining expectations. For example,
if we're not running pixel tests, then tests expected to fail as IMAGE
will PASS."""
expected_results = expected_results.copy()
if IMAGE in expected_results:
expected_results.remove(IMAGE)
expected_results.add(PASS)
return expected_results
@staticmethod
def has_pixel_failures(actual_results):
return IMAGE in actual_results or FAIL in actual_results
@staticmethod
def suffixes_for_expectations(expectations):
suffixes = set()
if IMAGE in expectations:
suffixes.add('png')
if FAIL in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return set(suffixes)
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
def __init__(self, port, tests=None, include_generic=True, include_overrides=True, expectations_to_lint=None):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = expectations_to_lint is not None
self._model = TestExpectationsModel(self._shorten_filename)
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
self._expectations = []
expectations_dict = expectations_to_lint or port.expectations_dict()
expectations_dict_index = 0
# Populate generic expectations (if enabled by include_generic).
if port.path_to_generic_test_expectations_file() in expectations_dict:
if include_generic:
expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
self._add_expectations(expectations)
self._expectations += expectations
expectations_dict_index += 1
# Populate default port expectations (always enabled).
if len(expectations_dict) > expectations_dict_index:
expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
self._add_expectations(expectations)
self._expectations += expectations
expectations_dict_index += 1
# Populate override expectations (if enabled by include_overrides).
while len(expectations_dict) > expectations_dict_index and include_overrides:
expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
self._add_expectations(expectations)
self._expectations += expectations
expectations_dict_index += 1
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
def model(self):
return self._model
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test):
return self._model.get_expectations(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def has_modifier(self, test, modifier):
return self._model.has_modifier(test, modifier)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_result_type(self, result_type):
return self._model.get_tests_with_result_type(result_type)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_test_set(self, modifier, expectation=None, include_skips=True):
return self._model.get_test_set(modifier, expectation, include_skips)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_modifiers(self, test):
return self._model.get_modifiers(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_timeline(self, timeline):
return self._model.get_tests_with_timeline(timeline)
def get_expectations_string(self, test):
return self._model.get_expectations_string(test)
def expectation_to_string(self, expectation):
return self._model.expectation_to_string(expectation)
def matches_an_expected_result(self, test, result, pixel_tests_are_enabled):
expected_results = self._model.get_expectations(test)
if not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result,
expected_results,
self.is_rebaselining(test),
self._model.has_modifier(test, SKIP))
def is_rebaselining(self, test):
return self._model.has_modifier(test, REBASELINE)
def _shorten_filename(self, filename):
if filename.startswith(self._port.path_from_webkit_base()):
return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
return filename
def _report_warnings(self):
warnings = []
for expectation in self._expectations:
for warning in expectation.warnings:
warnings.append('%s:%d %s %s' % (self._shorten_filename(expectation.filename), expectation.line_number,
warning, expectation.name if expectation.expectations else expectation.original_string))
if warnings:
self._has_warnings = True
if self._is_lint_mode:
raise ParseError(warnings)
_log.warning('--lint-test-files warnings:')
for warning in warnings:
_log.warning(warning)
_log.warning('')
def _process_tests_without_expectations(self):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
def remove_configuration_from_test(self, test, test_configuration):
expectations_to_remove = []
modified_expectations = []
for expectation in self._expectations:
if expectation.name != test or expectation.is_flaky() or not expectation.parsed_expectations:
continue
if iter(expectation.parsed_expectations).next() not in (FAIL, IMAGE):
continue
if test_configuration not in expectation.matching_configurations:
continue
expectation.matching_configurations.remove(test_configuration)
if expectation.matching_configurations:
modified_expectations.append(expectation)
else:
expectations_to_remove.append(expectation)
for expectation in expectations_to_remove:
self._expectations.remove(expectation)
return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
def remove_rebaselined_tests(self, except_these_tests, filename):
"""Returns a copy of the expectations in the file with the tests removed."""
def without_rebaseline_modifier(expectation):
return (expectation.filename == filename and
not (not expectation.is_invalid() and
expectation.name in except_these_tests and
'rebaseline' in expectation.parsed_modifiers))
return self.list_to_string(filter(without_rebaseline_modifier, self._expectations), reconstitute_only_these=[])
def _add_expectations(self, expectation_list):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._is_lint_mode or self._test_config in expectation_line.matching_configurations:
self._model.add_expectation_line(expectation_line)
def add_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
if test.name and test.name in tests_to_skip:
test.warnings.append('%s:%d %s is also in a Skipped file.' % (test.filename, test.line_number, test.name))
for test_name in tests_to_skip:
expectation_line = self._parser.expectation_for_skipped_test(test_name)
self._model.add_expectation_line(expectation_line, in_skipped=True)
@staticmethod
def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
def serialize(expectation_line):
# If reconstitute_only_these is an empty list, we want to return original_string.
# So we need to compare reconstitute_only_these to None, not just check if it's falsey.
if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return expectation_line.to_string(test_configuration_converter)
return expectation_line.original_string
def nones_out(expectation_line):
return expectation_line is not None
return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
| bsd-3-clause |
PeterPetrik/QGIS | python/plugins/processing/tests/GdalAlgorithmsVectorTest.py | 17 | 44507 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmVectorTest.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : matthias@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
import nose2
import os
import shutil
import tempfile
from qgis.core import (QgsProcessingContext,
QgsProcessingFeedback,
QgsCoordinateReferenceSystem,
QgsRectangle)
from qgis.testing import (start_app,
unittest)
import AlgorithmsTestBase
from processing.algs.gdal.ogr2ogr import ogr2ogr
from processing.algs.gdal.ogrinfo import ogrinfo
from processing.algs.gdal.Buffer import Buffer
from processing.algs.gdal.Dissolve import Dissolve
from processing.algs.gdal.OffsetCurve import OffsetCurve
from processing.algs.gdal.OgrToPostGis import OgrToPostGis
from processing.algs.gdal.OneSideBuffer import OneSideBuffer
from processing.algs.gdal.PointsAlongLines import PointsAlongLines
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
class TestGdalVectorAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest):
@classmethod
def setUpClass(cls):
start_app()
from processing.core.Processing import Processing
Processing.initialize()
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
for path in cls.cleanup_paths:
shutil.rmtree(path)
def test_definition_file(self):
return 'gdal_algorithm_vector_tests.yaml'
def testOgr2Ogr(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
alg = ogr2ogr()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
'-f "ESRI Shapefile" ' + outdir + '/check.shp ' +
source + ' polys2'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.kml'}, context, feedback),
['ogr2ogr',
'-f "LIBKML" ' + outdir + '/check.kml ' +
source + ' polys2'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/my out/check.kml'}, context, feedback),
['ogr2ogr',
'-f "LIBKML" "' + outdir + '/my out/check.kml" ' +
source + ' polys2'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.gpkg'}, context, feedback),
['ogr2ogr',
'-f "GPKG" ' + outdir + '/check.gpkg ' +
source + ' polys2'])
def testOgrInfo(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
alg = ogrinfo()
alg.initAlgorithm()
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SUMMARY_ONLY': True,
'NO_METADATA': False}, context, feedback),
['ogrinfo',
'-al -so ' +
source + ' polys2'])
source = os.path.join(testDataPath, 'filename with spaces.gml')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SUMMARY_ONLY': True,
'NO_METADATA': False}, context, feedback),
['ogrinfo',
'-al -so "' +
source + '" filename_with_spaces'])
source = os.path.join(testDataPath, 'filename with spaces.gml')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SUMMARY_ONLY': False,
'NO_METADATA': False}, context, feedback),
['ogrinfo',
'-al "' +
source + '" filename_with_spaces'])
source = os.path.join(testDataPath, 'filename with spaces.gml')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SUMMARY_ONLY': True,
'NO_METADATA': True}, context, feedback),
['ogrinfo',
'-al -so -nomd "' +
source + '" filename_with_spaces'])
def testBuffer(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = Buffer()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Buffer(geometry, 5.0) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'DISSOLVE': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Union(ST_Buffer(geometry, 5.0)) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 1,
'DISSOLVE': True,
'EXPLODE_COLLECTIONS': False,
'GEOMETRY': 'geom',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Union(ST_Buffer(geom, 1.0)) AS geom,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'EXPLODE_COLLECTIONS': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Buffer(geometry, 5.0) AS geometry,* FROM """polys2"""" ' +
'-explodecollections -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'FIELD': 'total population',
'EXPLODE_COLLECTIONS': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Union(ST_Buffer(geometry, 5.0)) AS geometry,* FROM """polys2""" GROUP BY """total population"""" ' +
'-explodecollections -f "ESRI Shapefile"'])
def testDissolve(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = Dissolve()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'total population',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """total population""" FROM """polys2""" ' +
'GROUP BY """total population"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'FIELD': 'my_field',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
'"' + source_with_space + '" ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """filename_with_spaces""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'GEOMETRY': 'the_geom',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(the_geom) AS the_geom, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'KEEP_ATTRIBUTES': False,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'KEEP_ATTRIBUTES': False,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'EXPLODE_COLLECTIONS': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -explodecollections -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COUNT_FEATURES': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""", COUNT(geometry) AS count FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COUNT_FEATURES': True,
'GEOMETRY': 'the_geom',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(the_geom) AS the_geom, """my_field""", COUNT(the_geom) AS count FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COMPUTE_AREA': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""", SUM(ST_Area(geometry)) AS area, ' +
'ST_Perimeter(ST_Union(geometry)) AS perimeter FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COMPUTE_AREA': True,
'GEOMETRY': 'the_geom',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(the_geom) AS the_geom, """my_field""", SUM(ST_Area(the_geom)) AS area, ' +
'ST_Perimeter(ST_Union(the_geom)) AS perimeter FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COMPUTE_STATISTICS': True,
'STATISTICS_ATTRIBUTE': 'my_val',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""", ' +
'SUM("""my_val""") AS sum, MIN("""my_val""") AS min, MAX("""my_val""") AS max, AVG("""my_val""") AS avg FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'test field',
'COMPUTE_STATISTICS': True,
'STATISTICS_ATTRIBUTE': 'total population',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """test field""", ' +
'SUM("""total population""") AS sum, MIN("""total population""") AS min, MAX("""total population""") AS max, ' +
'AVG("""total population""") AS avg FROM """polys2""" ' +
'GROUP BY """test field"""" -f "ESRI Shapefile"'])
# compute stats without stats attribute, and vice versa (should be ignored)
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'COMPUTE_STATISTICS': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'STATISTICS_ATTRIBUTE': 'my_val',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'my_field',
'OPTIONS': 'my opts',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-nlt PROMOTE_TO_MULTI -dialect sqlite -sql "SELECT ST_Union(geometry) AS geometry, """my_field""" FROM """polys2""" ' +
'GROUP BY """my_field"""" "my opts" -f "ESRI Shapefile"'])
def testOgr2PostGis(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = OgrToPostGis()
alg.initAlgorithm()
self.assertEqual(
alg.getConsoleCommands({'INPUT': source}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 "' + source_with_space + '" filename_with_spaces '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.filename_with_spaces -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'HOST': 'google.com'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=google.com port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PORT': 3333}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=3333 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'USER': 'kevin_bacon'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public user=kevin_bacon" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DBNAME': 'secret_stuff'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 dbname=secret_stuff active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PASSWORD': 'passw0rd'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 password=passw0rd active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SCHEMA': 'desktop'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=desktop" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln desktop.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'TABLE': 'out_table'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.out_table -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PK': ''}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PK': 'new_fid'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=new_fid -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PK': '',
'PRIMARY_KEY': 'objectid'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=objectid -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PK': 'new_id',
'PRIMARY_KEY': 'objectid'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=new_id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'GEOCOLUMN': 'my_geom'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=my_geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DIM': 1}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=3 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SIMPLIFY': 5}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -simplify 5 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SEGMENTIZE': 4}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -segmentize 4 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SPAT': QgsRectangle(1, 2, 3, 4)}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -spat 1.0 2.0 3.0 4.0 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELDS': ['f1', 'f2']}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 -select "f1,f2" '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'WHERE': '0=1'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -where "0=1" -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'GT': 2}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -gt 2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OVERWRITE': False}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'APPEND': True}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-append -overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'ADDFIELDS': True}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-addfields -overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LAUNDER': True}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-lco LAUNDER=NO -overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'INDEX': True}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-lco SPATIAL_INDEX=OFF -overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SKIPFAILURES': True}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -skipfailures -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PROMOTETOMULTI': False}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'PRECISION': False}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI -lco PRECISION=NO'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OPTIONS': 'blah'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI blah'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SHAPE_ENCODING': 'blah'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES --config SHAPE_ENCODING blah -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'GTYPE': 4}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -nlt LINESTRING -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'A_SRS': 'EPSG:3111'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -a_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'A_SRS': QgsCoordinateReferenceSystem('EPSG:3111')}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -a_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'A_SRS': custom_crs}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -a_srs EPSG:20936 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'T_SRS': 'EPSG:3111'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -t_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'T_SRS': QgsCoordinateReferenceSystem('EPSG:3111')}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -t_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'T_SRS': custom_crs}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -t_srs EPSG:20936 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'S_SRS': 'EPSG:3111'}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -s_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'S_SRS': QgsCoordinateReferenceSystem('EPSG:3111')}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -s_srs EPSG:3111 -nlt PROMOTE_TO_MULTI'])
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'S_SRS': custom_crs}, context, feedback),
['ogr2ogr',
'-progress --config PG_USE_COPY YES -f PostgreSQL "PG:host=localhost port=5432 active_schema=public" '
'-lco DIM=2 ' + source + ' polys2 '
'-overwrite -lco GEOMETRY_NAME=geom -lco FID=id -nln public.polys2 -s_srs EPSG:20936 -nlt PROMOTE_TO_MULTI'])
def testOffsetCurve(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = OffsetCurve()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_OffsetCurve(geometry, 5.0) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
def testOneSidedBuffer(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = OneSideBuffer()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_SingleSidedBuffer(geometry, 5.0, 0) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'DISSOLVE': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Union(ST_SingleSidedBuffer(geometry, 5.0, 0)) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'EXPLODE_COLLECTIONS': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_SingleSidedBuffer(geometry, 5.0, 0) AS geometry,* FROM """polys2"""" ' +
'-explodecollections -f "ESRI Shapefile"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 5,
'FIELD': 'total population',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Union(ST_SingleSidedBuffer(geometry, 5.0, 0)) AS geometry,* ' +
'FROM """polys2""" GROUP BY """total population"""" -f "ESRI Shapefile"'])
def testPointsAlongLines(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
source_with_space = os.path.join(testDataPath, 'filename with spaces.gml')
alg = PointsAlongLines()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'DISTANCE': 0.2,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['ogr2ogr',
outdir + '/check.shp ' +
source + ' ' +
'-dialect sqlite -sql "SELECT ST_Line_Interpolate_Point(geometry, 0.2) AS geometry,* FROM """polys2"""" ' +
'-f "ESRI Shapefile"'])
if __name__ == '__main__':
nose2.main()
| gpl-2.0 |
drankye/SSM | smart-zeppelin/travis_check.py | 30 | 4073 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script checks build status of given pullrequest identified by author and commit hash.
#
# usage)
# python travis_check.py [author] [commit hash] [check interval (optional)]
#
# example)
# # full hash
# python travis_check.py Leemoonsoo 1f2549a38f440ebfbfe2d32a041684e3e39b496c
#
# # with short hash
# python travis_check.py Leemoonsoo 1f2549a
#
# # with custom check interval
# python travis_check.py Leemoonsoo 1f2549a 5,60,60
import os, sys, getopt, traceback, json, requests, time
author = sys.argv[1]
commit = sys.argv[2]
# check interval in sec
check = [5, 60, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 600, 600, 600, 600, 600, 600]
if len(sys.argv) > 3:
check = map(lambda x: int(x), sys.argv[3].split(","))
def info(msg):
print("[" + time.strftime("%Y-%m-%d %H:%M:%S") + "] " + msg)
sys.stdout.flush()
info("Author: " + author + ", commit: " + commit)
def getBuildStatus(author, commit):
travisApi = "https://api.travis-ci.org/"
# get latest 25 builds
resp = requests.get(url=travisApi + "/repos/" + author + "/zeppelin/builds")
data = json.loads(resp.text)
build = None
if len(data) == 0:
return build;
for b in data:
if b["commit"][:len(commit)] == commit:
resp = requests.get(url=travisApi + "/repos/" + author + "/zeppelin/builds/" + str(b["id"]))
build = json.loads(resp.text)
break
return build
def status(index, msg, jobId):
return '{:20}'.format("[" + str(index+1) + "] " + msg) + "https://travis-ci.org/" + author + "/zeppelin/jobs/" + str(jobId)
def printBuildStatus(build):
failure = 0
running = 0
for index, job in enumerate(build["matrix"]):
result = job["result"]
jobId = job["id"]
if job["started_at"] == None and result == None:
print(status(index, "Not started", jobId))
running = running + 1
elif job["started_at"] != None and job["finished_at"] == None:
print(status(index, "Running ...", jobId))
running = running + 1
elif job["started_at"] != None and job["finished_at"] != None:
if result == None:
print(status(index, "Not completed", jobId))
failure = failure + 1
elif result == 0:
print(status(index, "OK", jobId))
else:
print(status(index, "Error " + str(result), jobId))
failure = failure + 1
else:
print(status(index, "Unknown state", jobId))
failure = failure + 1
return failure, running
for sleep in check:
info("--------------------------------")
time.sleep(sleep);
info("Get build status ...")
build = getBuildStatus(author, commit)
if build == None:
info("Can't find build for commit " + commit + " from " + author)
sys.exit(2)
print("Build https://travis-ci.org/" + author + "/zeppelin/builds/" + str(build["id"]))
failure, running = printBuildStatus(build)
print(str(failure) + " job(s) failed, " + str(running) + " job(s) running/pending")
if failure != 0:
sys.exit(1)
if failure == 0 and running == 0:
info("CI Green!")
sys.exit(0)
info("Timeout")
sys.exit(1)
| apache-2.0 |
elena/django | tests/unmanaged_models/tests.py | 133 | 2135 | from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests while we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a.set([a])
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
| bsd-3-clause |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/protobuf/python/google/protobuf/internal/message_test.py | 224 | 22295 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
| mit |
cjaymes/pyscap | src/scap/model/oval_5/sc/unix/SymlinkItemElement.py | 1 | 1133 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.sc.ItemType import ItemType
logger = logging.getLogger(__name__)
class SymlinkItemElement(ItemType):
MODEL_MAP = {
'elements': [
{'tag_name': 'filepath', 'min': 1, 'class': 'scap.model.oval_5.sc.EntityItemType', 'max': 1},
{'tag_name': 'canonical_path', 'min': 1, 'class': 'scap.model.oval_5.sc.EntityItemType', 'max': 1},
],
'attributes': {
},
}
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/212_test_getopt.py | 15 | 4035 | # test_getopt.py
# David Goodger <dgoodger@bigfoot.com> 2000-08-19
import getopt
from getopt import GetoptError
from test_support import verify, verbose
def expectException(teststr, expected, failure=AssertionError):
"""Executes a statement passed in teststr, and raises an exception
(failure) if the expected exception is *not* raised."""
try:
exec teststr
except expected:
pass
else:
raise failure
if verbose:
print 'Running tests on getopt.short_has_arg'
verify(getopt.short_has_arg('a', 'a:'))
verify(not getopt.short_has_arg('a', 'a'))
expectException("tmp = getopt.short_has_arg('a', 'b')", GetoptError)
expectException("tmp = getopt.short_has_arg('a', '')", GetoptError)
if verbose:
print 'Running tests on getopt.long_has_args'
has_arg, option = getopt.long_has_args('abc', ['abc='])
verify(has_arg)
verify(option == 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
verify(not has_arg)
verify(option == 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
verify(not has_arg)
verify(option == 'abcd')
expectException("has_arg, option = getopt.long_has_args('abc', ['def'])",
GetoptError)
expectException("has_arg, option = getopt.long_has_args('abc', [])",
GetoptError)
expectException("has_arg, option = " + \
"getopt.long_has_args('abc', ['abcd','abcde'])",
GetoptError)
if verbose:
print 'Running tests on getopt.do_shorts'
opts, args = getopt.do_shorts([], 'a', 'a', [])
verify(opts == [('-a', '')])
verify(args == [])
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
verify(opts == [('-a', '1')])
verify(args == [])
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#verify(opts == [('-a', '1')])
#verify(args == [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
verify(opts == [('-a', '1')])
verify(args == [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
verify(opts == [('-a', '1')])
verify(args == ['2'])
expectException("opts, args = getopt.do_shorts([], 'a1', 'a', [])",
GetoptError)
expectException("opts, args = getopt.do_shorts([], 'a', 'a:', [])",
GetoptError)
if verbose:
print 'Running tests on getopt.do_longs'
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
verify(opts == [('--abc', '')])
verify(args == [])
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
verify(opts == [('--abc', '1')])
verify(args == [])
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
verify(opts == [('--abcd', '1')])
verify(args == [])
opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
verify(opts == [('--abc', '')])
verify(args == [])
# Much like the preceding, except with a non-alpha character ("-") in
# option name that precedes "="; failed in
# http://sourceforge.net/bugs/?func=detailbug&bug_id=126863&group_id=5470
opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
verify(opts == [('--foo', '42')])
verify(args == [])
expectException("opts, args = getopt.do_longs([], 'abc=1', ['abc'], [])",
GetoptError)
expectException("opts, args = getopt.do_longs([], 'abc', ['abc='], [])",
GetoptError)
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a', '',
'--beta', 'arg1', 'arg2']
if verbose:
print 'Running tests on getopt.getopt'
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
verify(opts == [('-a', '1'), ('-b', ''), ('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')] )
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
verify(args == ['arg1', 'arg2'])
expectException(
"opts, args = getopt.getopt(cmdline, 'a:b', ['alpha', 'beta'])",
GetoptError)
if verbose:
print "Module getopt: tests completed successfully."
| gpl-3.0 |
achals/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_config.py | 166 | 19617 | import py, pytest
import _pytest._code
from _pytest.config import getcfg, get_common_ancestor, determine_setup
from _pytest.main import EXIT_NOTESTSCOLLECTED
class TestParseIni:
def test_getcfg_and_config(self, testdir, tmpdir):
sub = tmpdir.mkdir("sub")
sub.chdir()
tmpdir.join("setup.cfg").write(_pytest._code.Source("""
[pytest]
name = value
"""))
rootdir, inifile, cfg = getcfg([sub], ["setup.cfg"])
assert cfg['name'] == "value"
config = testdir.parseconfigure(sub)
assert config.inicfg['name'] == 'value'
def test_getcfg_empty_path(self, tmpdir):
getcfg([''], ['setup.cfg']) #happens on py.test ""
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"')
tmpdir.join("setup.cfg").write(_pytest._code.Source("""
[pytest]
addopts = --verbose
"""))
config = testdir.parseconfig(tmpdir)
assert config.option.color == 'no'
assert config.option.reportchars == 's'
assert config.option.tbstyle == 'short'
assert config.option.verbose
#config = testdir.Config()
#args = [tmpdir,]
#config._preparse(args, addopts=False)
#assert len(args) == 1
def test_tox_ini_wrong_version(self, testdir):
testdir.makefile('.ini', tox="""
[pytest]
minversion=9.0
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
"*tox.ini:2*requires*9.0*actual*"
])
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_ini_names(self, testdir, name):
testdir.tmpdir.join(name).write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.0
"""))
config = testdir.parseconfig()
assert config.getini("minversion") == "1.0"
def test_toxini_before_lower_pytestini(self, testdir):
sub = testdir.tmpdir.mkdir("sub")
sub.join("tox.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 2.0
"""))
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.5
"""))
config = testdir.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
@pytest.mark.xfail(reason="probably not needed")
def test_confcutdir(self, testdir):
sub = testdir.mkdir("sub")
sub.chdir()
testdir.makeini("""
[pytest]
addopts = --qwe
""")
result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing:
def test_parsing_again_fails(self, testdir):
config = testdir.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("custom", "")
""")
testdir.makeini("""
[pytest]
custom = 0
""")
testdir.makefile(".cfg", custom = """
[pytest]
custom = 1
""")
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
class TestConfigAPI:
def test_config_trace(self, testdir):
config = testdir.parseconfig()
l = []
config.trace.root.setwriter(l.append)
config.trace("hello")
assert len(l) == 1
assert l[0] == "hello [config]\n"
def test_config_getoption(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
""")
config = testdir.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, "config.getoption('qweqwe')")
@pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]')
def test_config_getoption_unicode(self, testdir):
testdir.makeconftest("""
from __future__ import unicode_literals
def pytest_addoption(parser):
parser.addoption('--hello', type='string')
""")
config = testdir.parseconfig('--hello=this')
assert config.getoption('hello') == 'this'
def test_config_getvalueorskip(self, testdir):
config = testdir.parseconfig()
pytest.raises(pytest.skip.Exception,
"config.getvalueorskip('hello')")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
def test_config_getvalueorskip_None(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello")
""")
config = testdir.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip('hello')
def test_getoption(self, testdir):
config = testdir.parseconfig()
with pytest.raises(ValueError):
config.getvalue('x')
assert config.getoption("x", 1) == 1
def test_getconftest_pathlist(self, testdir, tmpdir):
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
config = testdir.parseconfigure(p)
assert config._getconftest_pathlist('notexist', path=tmpdir) is None
pl = config._getconftest_pathlist('pathlist', path=tmpdir)
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
def test_addini(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("myname", "my new ini value")
""")
testdir.makeini("""
[pytest]
myname=hello
""")
config = testdir.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, 'other')
def test_addini_pathlist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
parser.addini("abc", "abc value")
""")
p = testdir.makeini("""
[pytest]
paths=hello world/sub.py
""")
config = testdir.parseconfig()
l = config.getini("paths")
assert len(l) == 2
assert l[0] == p.dirpath('hello')
assert l[1] == p.dirpath('world/sub.py')
pytest.raises(ValueError, config.getini, 'other')
def test_addini_args(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
parser.addini("a2", "", "args", default="1 2 3".split())
""")
testdir.makeini("""
[pytest]
args=123 "123 hello" "this"
""")
config = testdir.parseconfig()
l = config.getini("args")
assert len(l) == 3
assert l == ["123", "123 hello", "this"]
l = config.getini("a2")
assert l == list("123")
def test_addini_linelist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
parser.addini("a2", "", "linelist")
""")
testdir.makeini("""
[pytest]
xy= 123 345
second line
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 2
assert l == ["123 345", "second line"]
l = config.getini("a2")
assert l == []
@pytest.mark.parametrize('str_val, bool_val',
[('True', True), ('no', False), ('no-ini', True)])
def test_addini_bool(self, testdir, str_val, bool_val):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
""")
if str_val != 'no-ini':
testdir.makeini("""
[pytest]
strip=%s
""" % str_val)
config = testdir.parseconfig()
assert config.getini("strip") is bool_val
def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
testdir.makeini("""
[pytest]
xy= 123
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 1
assert l == ["123"]
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 2
assert l == ["123", "456"]
def test_addinivalue_line_new(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
config = testdir.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 1
assert l == ["456"]
config.addinivalue_line("xy", "123")
l = config.getini("xy")
assert len(l) == 2
assert l == ["456", "123"]
class TestConfigFromdictargs:
def test_basic_behavior(self):
from _pytest.config import Config
option_dict = {
'verbose': 444,
'foo': 'bar',
'capture': 'no',
}
args = ['a', 'b']
config = Config.fromdictargs(option_dict, args)
with pytest.raises(AssertionError):
config.parse(['should refuse to parse again'])
assert config.option.verbose == 444
assert config.option.foo == 'bar'
assert config.option.capture == 'no'
assert config.args == args
def test_origargs(self):
"""Show that fromdictargs can handle args in their "orig" format"""
from _pytest.config import Config
option_dict = {}
args = ['-vvvv', '-s', 'a', 'b']
config = Config.fromdictargs(option_dict, args)
assert config.args == ['a', 'b']
assert config._origargs == args
assert config.option.verbose == 4
assert config.option.capture == 'no'
def test_inifilename(self, tmpdir):
tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source("""
[pytest]
name = value
"""))
from _pytest.config import Config
inifile = '../../foo/bar.ini'
option_dict = {
'inifilename': inifile,
'capture': 'no',
}
cwd = tmpdir.join('a/b')
cwd.join('pytest.ini').ensure().write(_pytest._code.Source("""
[pytest]
name = wrong-value
should_not_be_set = true
"""))
with cwd.ensure(dir=True).as_cwd():
config = Config.fromdictargs(option_dict, ())
assert config.args == [str(cwd)]
assert config.option.inifilename == inifile
assert config.option.capture == 'no'
# this indicates this is the file used for getting configuration values
assert config.inifile == inifile
assert config.inicfg.get('name') == 'value'
assert config.inicfg.get('should_not_be_set') is None
def test_options_on_small_file_do_not_blow_up(testdir):
def runfiletest(opts):
reprec = testdir.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = testdir.makepyfile("""
def test_f1(): assert 0
def test_f2(): assert 0
""")
for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'],
['--tb=long'], ['--fulltrace'], ['--nomagic'],
['--traceconfig'], ['-v'], ['-v', '-v']):
runfiletest(opts + [path])
def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
class dist:
pass
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
testdir.makeconftest("""
pytest_plugins = "mytestplugin",
""")
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = testdir.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
def load(self):
assert 0, "should not arrive here"
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
config = testdir.parseconfig("-p", "no:mytestplugin")
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin is None
def test_cmdline_processargs_simple(testdir):
testdir.makeconftest("""
def pytest_cmdline_preparse(args):
args.append("-h")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*pytest*",
"*-h*",
])
def test_invalid_options_show_extra_information(testdir):
"""display extra information when pytest exits due to unrecognized
options in the command-line"""
testdir.makeini("""
[pytest]
addopts = --invalid-option
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines([
"*error: unrecognized arguments: --invalid-option*",
"* inifile: %s*" % testdir.tmpdir.join('tox.ini'),
"* rootdir: %s*" % testdir.tmpdir,
])
@pytest.mark.parametrize('args', [
['dir1', 'dir2', '-v'],
['dir1', '-v', 'dir2'],
['dir2', '-v', 'dir1'],
['-v', 'dir2', 'dir1'],
])
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
"""
Consider all arguments in the command-line for rootdir and inifile
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = testdir.tmpdir.mkdir('myroot')
d1 = root.mkdir('dir1')
d2 = root.mkdir('dir2')
for i, arg in enumerate(args):
if arg == 'dir1':
args[i] = d1
elif arg == 'dir2':
args[i] = d2
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: '])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_toolongargs_issue224(testdir):
result = testdir.runpytest("-m", "hello" * 500)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_notify_exception(testdir, capfd):
config = testdir.parseconfig()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self, excrepr):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_load_initial_conftest_last_ordering(testdir):
from _pytest.config import get_config
pm = get_config().pluginmanager
class My:
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
l = hc._nonwrappers + hc._wrappers
assert l[-1].function.__module__ == "_pytest.capture"
assert l[-2].function == m.pytest_load_initial_conftests
assert l[-3].function.__module__ == "_pytest.config"
class TestWarning:
def test_warn_config(self, testdir):
testdir.makeconftest("""
l = []
def pytest_configure(config):
config.warn("C1", "hello")
def pytest_logwarning(code, message):
if message == "hello" and code == "C1":
l.append(1)
""")
testdir.makepyfile("""
def test_proper(pytestconfig):
import conftest
assert conftest.l == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_warn_on_test_item_from_request(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def fix(request):
request.node.warn("T1", "hello")
def test_hello(fix):
pass
""")
result = testdir.runpytest()
assert result.parseoutcomes()["pytest-warnings"] > 0
assert "hello" not in result.stdout.str()
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines("""
===*pytest-warning summary*===
*WT1*test_warn_on_test_item*:5*hello*
""")
class TestRootdir:
def test_simple_noini(self, tmpdir):
assert get_common_ancestor([tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir.mkdir("a"), tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir, tmpdir.join("a")]) == tmpdir
with tmpdir.as_cwd():
assert get_common_ancestor([]) == tmpdir
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_with_ini(self, tmpdir, name):
inifile = tmpdir.join(name)
inifile.write("[pytest]\n")
a = tmpdir.mkdir("a")
b = a.mkdir("b")
for args in ([tmpdir], [a], [b]):
rootdir, inifile, inicfg = determine_setup(None, args)
assert rootdir == tmpdir
assert inifile == inifile
rootdir, inifile, inicfg = determine_setup(None, [b,a])
assert rootdir == tmpdir
assert inifile == inifile
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
def test_pytestini_overides_empty_other(self, tmpdir, name):
inifile = tmpdir.ensure("pytest.ini")
a = tmpdir.mkdir("a")
a.ensure(name)
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile == inifile
def test_setuppy_fallback(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("setup.cfg")
tmpdir.ensure("setup.py")
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_nothing(self, tmpdir):
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_with_specific_inifile(self, tmpdir):
inifile = tmpdir.ensure("pytest.ini")
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
assert rootdir == tmpdir
| mpl-2.0 |
winest/PEInfo | PEInfo/HandleBasicInfo.py | 1 | 9317 | import os
import sys
import logging
import traceback
import configparser
import time
import subprocess
import pefile
import peutils
import hashlib
import xlsxwriter
import chardet
from collections import defaultdict
from Singleton import *
from ExcelInfo import *
from HashInfo import *
class CPeid( metaclass = Singleton ) :
def __init__( aSelf , aPatternFile ) :
with open( aPatternFile , "rt" , encoding="utf8" ) as db :
aSelf._sig = peutils.SignatureDatabase( data = db.read() )
def Match( aSelf , aPe , aEpOnly=True , aSectionStartOnly=False ) :
return aSelf._sig.match( aPe , aEpOnly , aSectionStartOnly )
def MatchAll( aSelf , aPe , aEpOnly=True , aSectionStartOnly=False ) :
return aSelf._sig.match_all( aPe , aEpOnly , aSectionStartOnly )
def GetFileHashes( aFilePath , aHasherNames , aBlocksize = 64 * 1024 ):
lsHashers = list()
for hasherName in aHasherNames :
lsHashers.append( hashlib.new(hasherName) )
with open( aFilePath , "rb" ) as file :
buf = file.read( aBlocksize )
while len( buf ) > 0 :
for hasher in lsHashers :
hasher.update( buf )
buf = file.read( aBlocksize )
lsHashes = list()
for hasher in lsHashers :
lsHashes.append( hasher.hexdigest() )
return lsHashes
def GetPeid( aPatternFile , aPe ) :
lsPeids = CPeid( aPatternFile ).MatchAll( aPe )
lsRet = []
if None != lsPeids :
for pair in lsPeids :
for peid in pair :
lsRet.append( peid )
return lsRet
def GetCompileTime( aPe ) :
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(aPe.FILE_HEADER.TimeDateStamp) )
def GetPdbStrings( aPe ) :
#http://www.debuginfo.com/articles/debuginfomatch.html#debuginfoinpe
#http://www.godevtool.com/Other/pdb.htm
lsPdbs = []
MAX_PATH = 260
try :
if ( hasattr(aPe , "DIRECTORY_ENTRY_DEBUG") ) :
for dbg in aPe.DIRECTORY_ENTRY_DEBUG :
if dbg.struct.Type == 2 :
if dbg.struct.AddressOfRawData == 0 : #PDB 2.0
dbgRva = aPe.get_rva_from_offset( dbg.struct.PointerToRawData )
rawPdb = aPe.get_data( dbgRva + 0x10 , MAX_PATH )
else : #PDB 7.0
rawPdb = aPe.get_data( dbg.struct.AddressOfRawData + 0x18 , MAX_PATH )
rawPdbLen = 0
for rawPdbByte in rawPdb :
if rawPdbByte == 0 :
break;
else :
rawPdbLen = rawPdbLen + 1
rawPdb = rawPdb[0:rawPdbLen]
dictEncoding = chardet.detect( rawPdb )
if ( dictEncoding ) :
encName = dictEncoding["encoding"]
lsPdbs.append( "({}) {}".format( encName , rawPdb.decode(encName) ) )
else :
lsPdbs.append( "({Unknown}) {}".format( rawPdb ) )
except Exception as ex :
logging.exception( "GetPdbStrings() failed" )
return sorted( lsPdbs )
def GetExportFuncs( aPe ) :
lsExported = list()
if ( hasattr(aPe , "DIRECTORY_ENTRY_EXPORT") ) :
for exp in aPe.DIRECTORY_ENTRY_EXPORT.symbols :
lsExported.append( exp.name )
return sorted( lsExported )
def HandleBasicInfo( aFilePaths , aConfig , aExcel , aExcelFmts , aMainDir ) :
#Get config
bWriteExcel = ( False != aConfig.getboolean( "General" , "WriteExcel" ) )
#Set interesting fields information
SHEET_NAME = "BasicInfo"
sheetInfo = CExcelSheetInfo( SHEET_NAME )
sheetInfo.AddColumn( "FileName" , CExcelColumnInfo( 0 , "FileName" , 20 , aExcelFmts["WrapVcenter"] ) )
sheetInfo.AddColumn( "BasicHash" , CExcelColumnInfo( 1 , "BasicHash" , 46 , aExcelFmts["WrapTop"] ) )
sheetInfo.AddColumn( "PEID" , CExcelColumnInfo( 2 , "PEID" , 32 , aExcelFmts["WrapTop"] ) )
sheetInfo.AddColumn( "ImpHash" , CExcelColumnInfo( 3 , "ImpHash" , 32 , aExcelFmts["Top"] ) )
sheetInfo.AddColumn( "CompileTime" , CExcelColumnInfo( 4 , "CompileTime" , 18 , aExcelFmts["Top"] ) )
sheetInfo.AddColumn( "PDB" , CExcelColumnInfo( 5 , "PDB" , 90 , aExcelFmts["WrapTop"] ) )
sheetInfo.AddColumn( "ExportFunc" , CExcelColumnInfo( 6 , "ExportFunc" , 90 , aExcelFmts["WrapTop"] ) )
if bWriteExcel :
#Initialize sheet by sheetInfo
sheet = None
for sheet in aExcel.worksheets() :
if sheet.get_name() == SHEET_NAME :
break
if sheet == None or sheet.get_name() != SHEET_NAME :
sheet = aExcel.add_worksheet( SHEET_NAME )
#Set column layout in excel
for strColName , info in sheetInfo.GetColumns().items() :
sheet.set_column( "{}:{}".format(info.strColId,info.strColId) , info.nColWidth , info.strColFormat )
#Start to get file information
uCount = 0
for strFilePath in aFilePaths :
try :
#Write default value for all fields
for info in sheetInfo.GetColumns().values() :
sheet.write( uCount + 1 , info.nColIndex , "<NULL>" )
#Name
print( "{}:".format( os.path.basename(strFilePath) ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("FileName").nColIndex , "{}{}({})".format(os.path.basename(strFilePath),os.linesep,strFilePath) )
#Basic hash
if ( False != aConfig.getboolean( "Features" , "BasicHash" ) ) :
lsHasherNames = [ "md5" , "sha1" , "sha256" ] #Case-sensitive
lsHashes = GetFileHashes( strFilePath , lsHasherNames )
CHashes().Add( CHashItem( aMd5 = lsHashes[lsHasherNames.index("md5")] , aSha1 = lsHashes[lsHasherNames.index("sha1")] , aSha256 = lsHashes[lsHasherNames.index("sha256")] ) )
strTmpHash = ""
for strHasherName , strHash in zip(lsHasherNames , lsHashes) :
strHasherNameDisplay = strHasherName.upper() if strHasherName.islower() or strHasherName.isupper() else strHasherName
print( " {:16}{}".format( strHasherNameDisplay , strHash ) )
if bWriteExcel :
if 0 < len(strTmpHash) :
strTmpHash += os.linesep
strTmpHash += "{}={}".format(strHasherNameDisplay,strHash)
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("BasicHash").nColIndex , strTmpHash )
#Put pe initialization here to support hash calculation even it's not a valid PE file
pe = pefile.PE( strFilePath )
if ( False != aConfig.getboolean( "Features" , "PEID" ) ) :
lsPeid = GetPeid( "{}\\_Tools\\userdb.txt".format(aMainDir) , pe )
print( " {:16}{}".format( "PEID" , lsPeid ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("PEID").nColIndex , os.linesep.join(lsPeid) )
if ( False != aConfig.getboolean( "Features" , "ImpHash" ) ) :
strImpHash = pe.get_imphash()
print( " {:16}{}".format( "ImpHash" , strImpHash ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("ImpHash").nColIndex , strImpHash )
if ( False != aConfig.getboolean( "Features" , "CompileTime" ) ) :
strCompileTime = GetCompileTime( pe )
print( " {:16}{}".format( "CompileTime" , strCompileTime ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("CompileTime").nColIndex , strCompileTime )
if ( False != aConfig.getboolean( "Features" , "PDB" ) ) :
lsPdb = GetPdbStrings( pe )
print( " {:16}{}".format( "PDB" , lsPdb ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("PDB").nColIndex , os.linesep.join(lsPdb) )
if ( False != aConfig.getboolean( "Features" , "ExportFunc" ) ) :
lsExportFuncs = GetExportFuncs( pe )
print( " {:16}{}".format( "Export" , lsExportFuncs ) )
if bWriteExcel :
sheet.write( uCount + 1 , sheetInfo.GetColumn("ExportFunc").nColIndex , os.linesep.join(lsExportFuncs) )
except pefile.PEFormatError :
logging.warning( "{} is not a valid PE file".format(strFilePath) )
except PermissionError :
logging.warning( "{} can not be opened".format(strFilePath) )
print( "\n" )
uCount = uCount + 1
#Make an excel table so one can find correlations easily
if bWriteExcel :
lsColumns = []
for i in range ( 0 , len(sheetInfo.GetColumns()) ) :
lsColumns.append( { "header" : sheetInfo.GetColNameByIndex(i) } )
sheet.add_table( "A1:{}{}".format(chr( ord('A')+len(sheetInfo.GetColumns())-1 ) , uCount+1) ,
{ "header_row" : True , "columns" : lsColumns }
)
sheet.freeze_panes( 1 , 1 )
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.