code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import os
import numpy as np
from pkg_resources import resource_filename
from digicampipe.io.event_stream import event_stream
from digicampipe.calib.trigger import fill_trigger_input_7, fill_trigger_patch,\
fill_digicam_baseline
example_file_path = resource_filename(
'digicampipe',
os.path.join(
'tests',
'resources',
'SST1M_01_20180918_261.fits.fz'
)
)
def test_trigger_input_7():
events = event_stream([example_file_path])
events = fill_trigger_input_7(events)
for event in events:
tel = event.r0.tels_with_data[0]
assert np.all(np.isfinite(event.r0.tel[tel].trigger_input_7))
def test_compute_trigger_input_7():
events = event_stream([example_file_path])
events = fill_digicam_baseline(events)
events = fill_trigger_patch(events)
events = fill_trigger_input_7(events)
for event in events:
tel = event.r0.tels_with_data[0]
assert np.all(np.isfinite(event.r0.tel[tel].trigger_input_7))
def test_compare_trigger_input_7():
events_digi = event_stream([example_file_path], disable_bar=True)
events_digi = fill_trigger_input_7(events_digi)
events_comp = event_stream([example_file_path], disable_bar=True)
events_comp = fill_digicam_baseline(events_comp)
events_comp = fill_trigger_patch(events_comp)
events_comp = fill_trigger_input_7(events_comp)
for event_digi, event_comp in zip(events_digi, events_comp):
tel = event_digi.r0.tels_with_data[0]
ti7_digi = event_digi.r0.tel[tel].trigger_input_7
ti7_comp = event_comp.r0.tel[tel].trigger_input_7
abs_diff = np.abs(ti7_digi - ti7_comp)
sum_ti7 = ti7_digi + ti7_comp
assert np.mean(abs_diff) < 3
assert np.nanmean(abs_diff/sum_ti7) < .05
if __name__ == '__main__':
test_compare_trigger_input_7()
|
calispac/digicampipe
|
digicampipe/tests/test_trigger.py
|
Python
|
gpl-3.0
| 1,846
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourceofbase import HasSourceOfBase
#-------------------------------------------------------------------------
#
# HasSourceOf
#
#-------------------------------------------------------------------------
class HasSourceOf(HasSourceOfBase):
"""Rule that checks family that have a particular source."""
labels = [ _('Source ID:') ]
name = _('Families with the <source>')
category = _('Citation/source filters')
description = _('Matches families who have a particular source')
|
Forage/Gramps
|
gramps/gen/filters/rules/family/_hassourceof.py
|
Python
|
gpl-2.0
| 1,812
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
AIM scripts library:
"""
import importlib
import os
# Define all the submodules we have
rootpath = os.path.dirname(__file__)
print("Reading pages from %s" % rootpath)
# Import each submodule into a hash called 'handlers'
handlers = {}
def loadPage(path):
for el in os.listdir(path):
filepath = os.path.join(path, el)
if el.find("__") == -1:
if os.path.isdir(filepath):
loadPage(filepath)
else:
p = filepath.replace(rootpath, "")[1:].replace('/', '.')[:-3]
xp = p.replace('.', '/')
print("Loading endpoint pages.%s as %s" % (p, xp))
handlers[xp] = importlib.import_module("pages.%s" % p)
loadPage(rootpath)
|
sebbASF/infrastructure-puppet
|
modules/aim_server/files/pages/__init__.py
|
Python
|
apache-2.0
| 1,524
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as neutronclient
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.neutron import firewall
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
firewall_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test neutron firewall resource",
"Parameters" : {},
"Resources" : {
"firewall": {
"Type": "OS::Neutron::Firewall",
"Properties": {
"name": "test-firewall",
"firewall_policy_id": "policy-id",
"admin_state_up": True,
}
}
}
}
'''
firewall_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test neutron firewall policy resource",
"Parameters" : {},
"Resources" : {
"firewall_policy": {
"Type": "OS::Neutron::FirewallPolicy",
"Properties": {
"name": "test-firewall-policy",
"shared": True,
"audited": True,
"firewall_rules": ['rule-id-1', 'rule-id-2'],
}
}
}
}
'''
firewall_rule_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test neutron firewall rule resource",
"Parameters" : {},
"Resources" : {
"firewall_rule": {
"Type": "OS::Neutron::FirewallRule",
"Properties": {
"name": "test-firewall-rule",
"shared": True,
"protocol": "tcp",
"action": "allow",
"enabled": True,
"ip_version": "4",
}
}
}
}
'''
class FirewallTest(HeatTestCase):
def setUp(self):
super(FirewallTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_firewall')
self.m.StubOutWithMock(neutronclient.Client, 'delete_firewall')
self.m.StubOutWithMock(neutronclient.Client, 'show_firewall')
self.m.StubOutWithMock(neutronclient.Client, 'update_firewall')
self.stub_keystoneclient()
def create_firewall(self):
neutronclient.Client.create_firewall({
'firewall': {
'name': 'test-firewall', 'admin_state_up': True,
'firewall_policy_id': 'policy-id'}}
).AndReturn({'firewall': {'id': '5678'}})
snippet = template_format.parse(firewall_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
return firewall.Firewall(
'firewall', resource_defns['firewall'], stack)
def test_create(self):
rsrc = self.create_firewall()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
neutronclient.Client.create_firewall({
'firewall': {
'name': 'test-firewall', 'admin_state_up': True,
'firewall_policy_id': 'policy-id'}}
).AndRaise(exceptions.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(firewall_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
rsrc = firewall.Firewall(
'firewall', resource_defns['firewall'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_firewall('5678')
neutronclient.Client.show_firewall('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_firewall('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_firewall('5678').AndRaise(
exceptions.NeutronClientException(status_code=400))
rsrc = self.create_firewall()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_firewall()
neutronclient.Client.show_firewall('5678').MultipleTimes(
).AndReturn(
{'firewall': {'admin_state_up': True,
'firewall_policy_id': 'policy-id'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual('policy-id', rsrc.FnGetAtt('firewall_policy_id'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_firewall()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall subnet_id) is '
'incorrect.', six.text_type(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_firewall()
neutronclient.Client.update_firewall(
'5678', {'firewall': {'admin_state_up': False}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['admin_state_up'] = False
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
class FirewallPolicyTest(HeatTestCase):
def setUp(self):
super(FirewallPolicyTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_firewall_policy')
self.m.StubOutWithMock(neutronclient.Client, 'delete_firewall_policy')
self.m.StubOutWithMock(neutronclient.Client, 'show_firewall_policy')
self.m.StubOutWithMock(neutronclient.Client, 'update_firewall_policy')
self.stub_keystoneclient()
def create_firewall_policy(self):
neutronclient.Client.create_firewall_policy({
'firewall_policy': {
'name': 'test-firewall-policy', 'shared': True,
'audited': True, 'firewall_rules': ['rule-id-1', 'rule-id-2']}}
).AndReturn({'firewall_policy': {'id': '5678'}})
snippet = template_format.parse(firewall_policy_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
return firewall.FirewallPolicy(
'firewall_policy', resource_defns['firewall_policy'], stack)
def test_create(self):
rsrc = self.create_firewall_policy()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
neutronclient.Client.create_firewall_policy({
'firewall_policy': {
'name': 'test-firewall-policy', 'shared': True,
'audited': True, 'firewall_rules': ['rule-id-1', 'rule-id-2']}}
).AndRaise(exceptions.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(firewall_policy_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
rsrc = firewall.FirewallPolicy(
'firewall_policy', resource_defns['firewall_policy'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_firewall_policy('5678')
neutronclient.Client.show_firewall_policy('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall_policy()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_firewall_policy('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall_policy()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_firewall_policy('5678').AndRaise(
exceptions.NeutronClientException(status_code=400))
rsrc = self.create_firewall_policy()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_firewall_policy()
neutronclient.Client.show_firewall_policy('5678').MultipleTimes(
).AndReturn(
{'firewall_policy': {'audited': True, 'shared': True}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('audited'))
self.assertIs(True, rsrc.FnGetAtt('shared'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_firewall_policy()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall_policy subnet_id) is '
'incorrect.', six.text_type(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_firewall_policy()
neutronclient.Client.update_firewall_policy(
'5678', {'firewall_policy': {'firewall_rules': ['3', '4']}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['firewall_rules'] = ['3', '4']
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
class FirewallRuleTest(HeatTestCase):
def setUp(self):
super(FirewallRuleTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_firewall_rule')
self.m.StubOutWithMock(neutronclient.Client, 'delete_firewall_rule')
self.m.StubOutWithMock(neutronclient.Client, 'show_firewall_rule')
self.m.StubOutWithMock(neutronclient.Client, 'update_firewall_rule')
self.stub_keystoneclient()
def create_firewall_rule(self):
neutronclient.Client.create_firewall_rule({
'firewall_rule': {
'name': 'test-firewall-rule', 'shared': True,
'action': 'allow', 'protocol': 'tcp', 'enabled': True,
'ip_version': "4"}}
).AndReturn({'firewall_rule': {'id': '5678'}})
snippet = template_format.parse(firewall_rule_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
return firewall.FirewallRule(
'firewall_rule', resource_defns['firewall_rule'], stack)
def test_create(self):
rsrc = self.create_firewall_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
neutronclient.Client.create_firewall_rule({
'firewall_rule': {
'name': 'test-firewall-rule', 'shared': True,
'action': 'allow', 'protocol': 'tcp', 'enabled': True,
'ip_version': "4"}}
).AndRaise(exceptions.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(firewall_rule_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
rsrc = firewall.FirewallRule(
'firewall_rule', resource_defns['firewall_rule'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_firewall_rule('5678')
neutronclient.Client.show_firewall_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_firewall_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_firewall_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_firewall_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=400))
rsrc = self.create_firewall_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_firewall_rule()
neutronclient.Client.show_firewall_rule('5678').MultipleTimes(
).AndReturn(
{'firewall_rule': {'protocol': 'tcp', 'shared': True}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual('tcp', rsrc.FnGetAtt('protocol'))
self.assertIs(True, rsrc.FnGetAtt('shared'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_firewall_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall_rule subnet_id) is '
'incorrect.', six.text_type(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_firewall_rule()
neutronclient.Client.update_firewall_rule(
'5678', {'firewall_rule': {'protocol': 'icmp'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['protocol'] = 'icmp'
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
|
redhat-openstack/heat
|
heat/tests/test_neutron_firewall.py
|
Python
|
apache-2.0
| 17,615
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.modify.torrent import TorrentFilename
from flexget.utils import bittorrent
log = logging.getLogger('torrent_scrub')
class TorrentScrub(object):
""" Scrubs torrents from unwanted keys.
Example:
tasks:
rutorrent-fast-resume-infected-task:
torrent_scrub: resume
"""
# Scrub at high level, but BELOW "torrent"
SCRUB_PRIO = TorrentFilename.TORRENT_PRIO - 10
# Scrubbing modes
SCRUB_MODES = ("off", "on", "all", "resume", "rtorrent",)
# Keys of rTorrent / ruTorrent session data
RT_KEYS = ("libtorrent_resume", "log_callback", "err_callback", "rtorrent")
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': list(SCRUB_MODES)},
{'type': 'array', 'items': {'type': 'string'}} # list of keys to scrub
]
}
@plugin.priority(SCRUB_PRIO)
def on_task_modify(self, task, config):
""" Scrub items that are torrents, if they're affected.
"""
if isinstance(config, list):
mode = "fields"
else:
mode = str(config).lower()
if mode in ("off", "false"):
log.debug("Plugin configured, but disabled")
return
for entry in task.entries:
# Skip non-torrents
if "torrent" not in entry:
continue
# Scrub keys as configured
modified = set()
metainfo = entry["torrent"].content
infohash = entry["torrent"].info_hash
if mode in ("on", "all", "true"):
modified = bittorrent.clean_meta(metainfo, including_info=(mode == "all"), logger=log.debug)
elif mode in ("resume", "rtorrent"):
if mode == "resume":
self.RT_KEYS = self.RT_KEYS[:1]
for key in self.RT_KEYS:
if key in metainfo:
log.debug("Removing key '%s'..." % (key,))
del metainfo[key]
modified.add(key)
elif mode == "fields":
# Scrub all configured fields
for key in config:
fieldname = key # store for logging
key = bittorrent.Torrent.KEY_TYPE(key)
field = metainfo
while field and '.' in key:
name, key = key.split('.', 1)
try:
field = field[name]
except KeyError:
# Key not found in this entry
field = None
log.trace((key, field))
if field and key in field:
log.debug("Removing key '%s'..." % (fieldname,))
del field[key]
modified.add(fieldname)
else:
raise ValueError("INTERNAL ERROR: Unknown mode %r" % mode)
# Commit any changes back into entry
if modified:
entry["torrent"].content = metainfo
entry["torrent"].modified = True
log.info((("Key %s was" if len(modified) == 1 else "Keys %s were") +
" scrubbed from torrent '%s'!") % (", ".join(sorted(modified)), entry['title']))
new_infohash = entry["torrent"].info_hash
if infohash != new_infohash:
log.warning("Info hash changed from #%s to #%s in '%s'" %
(infohash, new_infohash, entry['filename']))
@event('plugin.register')
def register_plugin():
plugin.register(TorrentScrub, groups=["torrent"], api_ver=2)
|
drwyrm/Flexget
|
flexget/plugins/modify/torrent_scrub.py
|
Python
|
mit
| 4,010
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.line"
_valid_props = {"color", "dash", "shape", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
Returns
-------
Any
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# shape
# -----
@property
def shape(self):
"""
Determines the line shape. The values correspond to step-wise
line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'hv', 'vh', 'hvh', 'vhv']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values correspond to
step-wise line shapes.
width
Sets the line width (in px).
"""
def __init__(
self, arg=None, color=None, dash=None, shape=None, width=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.Line`
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values correspond to
step-wise line shapes.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scattergl/_line.py
|
Python
|
mit
| 7,223
|
#!/usr/bin/python
import csv
import datetime
class Portfolio:
def __init__(self):
self.buy_trns = [] # List of all buy trans(date,stock,trn_type, qty, price)
self.sell_trns = [] # List of all sell trans(date,stock,trn_type, qty, price)
'''
Function to read the transactin file and create lists of tuples
containing the buys and sells separately. Returns data in the format
(transaction_date,stock,transaction_type,quantity); returns 0 in case
of error, 1 in case of successful read
'''
def read_transactions(self, transaction_file = None):
try:
f = open(transaction_file, 'rb')
except IOError:
print 'Error: Could not open file'
return 0
csv_read = csv.reader(f)
len = 1
for row in csv_read:
if len != 1: #avoid reading header columns
date_string = row[0].split('-')
trn_date = datetime.date(int(date_string[0]),
int(date_string[1]),int(date_string[2]))
if row[2] == 'B':
self.buy_trns.append((trn_date,row[1],row[2],row[3], row[4]))
elif row[2] == 'S':
self.sell_trns.append((trn_date,row[1],row[2],row[3], row[4]))
len = len + 1
return 1
'''
Function to calculate the capital gains and generate data in tabular form
'''
def get_capgain(self, stock_code):
# do a sort of both buy and sell data before taking up this
# separate code required here
for sale in self.sell_trns:
if sale[1] == stock_code:
sale_date = sale[0]
sale_quantity = sale[3]
sale_price = sale[4]
print sale_date, sale_quantity, sale_price
else:
return False
def display_buys(self):
for row in self.buy_trns:
print row
def display_sells(self):
for row in self.sell_trns:
print row
|
pradeepbp/stocker2
|
capgain.py
|
Python
|
gpl-3.0
| 2,113
|
# -*- coding: utf-8 -*-
'''
salt.utils.master
-----------------
Utilities that can only be used on a salt master.
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import multiprocessing
import signal
import tempfile
from threading import Thread, Event
# Import salt libs
import salt.log
import salt.client
import salt.pillar
import salt.utils
import salt.utils.atomicfile
import salt.utils.minions
import salt.payload
from salt.exceptions import SaltException
import salt.config
from salt.utils.cache import CacheCli as cache_cli
# Import third party libs
import salt.ext.six as six
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
log = logging.getLogger(__name__)
class MasterPillarUtil(object):
'''
Helper utility for easy access to targeted minion grain and
pillar data, either from cached data on the master or retrieved
on demand, or (by default) both.
The minion pillar data returned in get_minion_pillar() is
compiled directly from salt.pillar.Pillar on the master to
avoid any possible 'pillar poisoning' from a compromised or
untrusted minion.
** However, the minion grains are still possibly entirely
supplied by the minion. **
Example use case:
For runner modules that need access minion pillar data,
MasterPillarUtil.get_minion_pillar should be used instead
of getting the pillar data by executing the "pillar" module
on the minions:
# my_runner.py
tgt = 'web*'
pillar_util = salt.utils.master.MasterPillarUtil(tgt, expr_form='glob', opts=__opts__)
pillar_data = pillar_util.get_minion_pillar()
'''
def __init__(self,
tgt='',
expr_form='glob',
saltenv=None,
use_cached_grains=True,
use_cached_pillar=True,
grains_fallback=True,
pillar_fallback=True,
opts=None,
env=None):
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
log.debug('New instance of {0} created.'.format(
self.__class__.__name__))
if opts is None:
log.error('{0}: Missing master opts init arg.'.format(
self.__class__.__name__))
raise SaltException('{0}: Missing master opts init arg.'.format(
self.__class__.__name__))
else:
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.tgt = tgt
self.expr_form = expr_form
self.saltenv = saltenv
self.use_cached_grains = use_cached_grains
self.use_cached_pillar = use_cached_pillar
self.grains_fallback = grains_fallback
self.pillar_fallback = pillar_fallback
log.debug(
'Init settings: tgt: {0!r}, expr_form: {1!r}, saltenv: {2!r}, '
'use_cached_grains: {3}, use_cached_pillar: {4}, '
'grains_fallback: {5}, pillar_fallback: {6}'.format(
tgt, expr_form, saltenv, use_cached_grains, use_cached_pillar,
grains_fallback, pillar_fallback
)
)
def _get_cached_mine_data(self, *minion_ids):
# Return one dict with the cached mine data of the targeted minions
mine_data = dict([(minion_id, {}) for minion_id in minion_ids])
if (not self.opts.get('minion_data_cache', False)
and not self.opts.get('enforce_mine_cache', False)):
log.debug('Skipping cached mine data minion_data_cache'
'and enfore_mine_cache are both disabled.')
return mine_data
mdir = os.path.join(self.opts['cachedir'], 'minions')
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
path = os.path.join(mdir, minion_id, 'mine.p')
if os.path.isfile(path):
with salt.utils.fopen(path, 'rb') as fp_:
mdata = self.serial.loads(fp_.read())
if isinstance(mdata, dict):
mine_data[minion_id] = mdata
except (OSError, IOError):
return mine_data
return mine_data
def _get_cached_minion_data(self, *minion_ids):
# Return two separate dicts of cached grains and pillar data of the
# minions
grains = dict([(minion_id, {}) for minion_id in minion_ids])
pillars = grains.copy()
if not self.opts.get('minion_data_cache', False):
log.debug('Skipping cached data because minion_data_cache is not '
'enabled.')
return grains, pillars
mdir = os.path.join(self.opts['cachedir'], 'minions')
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
path = os.path.join(mdir, minion_id, 'data.p')
if os.path.isfile(path):
with salt.utils.fopen(path, 'rb') as fp_:
mdata = self.serial.loads(fp_.read())
if mdata.get('grains', False):
grains[minion_id] = mdata['grains']
if mdata.get('pillar', False):
pillars[minion_id] = mdata['pillar']
except (OSError, IOError):
return grains, pillars
return grains, pillars
def _get_live_minion_grains(self, minion_ids):
# Returns a dict of grains fetched directly from the minions
log.debug('Getting live grains for minions: "{0}"'.format(minion_ids))
client = salt.client.get_local_client(self.opts['conf_file'])
ret = client.cmd(
','.join(minion_ids),
'grains.items',
timeout=self.opts['timeout'],
expr_form='list')
return ret
def _get_live_minion_pillar(self, minion_id=None, minion_grains=None):
# Returns a dict of pillar data for one minion
if minion_id is None:
return {}
if not minion_grains:
log.warn(
'Cannot get pillar data for {0}: no grains supplied.'.format(
minion_id
)
)
return {}
log.debug('Getting live pillar for {0}'.format(minion_id))
pillar = salt.pillar.Pillar(
self.opts,
minion_grains,
minion_id,
self.saltenv,
self.opts['ext_pillar'])
log.debug('Compiling pillar for {0}'.format(minion_id))
ret = pillar.compile_pillar()
return ret
def _get_minion_grains(self, *minion_ids, **kwargs):
# Get the minion grains either from cache or from a direct query
# on the minion. By default try to use cached grains first, then
# fall back to querying the minion directly.
ret = {}
cached_grains = kwargs.get('cached_grains', {})
cret = {}
lret = {}
if self.use_cached_grains:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion grains for: {0}'.format(missed_minions))
if self.grains_fallback:
lret = self._get_live_minion_grains(missed_minions)
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = self._get_live_minion_grains(minion_ids)
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion grains for: {0}'.format(missed_minions))
if self.grains_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _get_minion_pillar(self, *minion_ids, **kwargs):
# Get the minion pillar either from cache or from a direct query
# on the minion. By default try use the cached pillar first, then
# fall back to rendering pillar on demand with the supplied grains.
ret = {}
grains = kwargs.get('grains', {})
cached_pillar = kwargs.get('cached_pillar', {})
cret = {}
lret = {}
if self.use_cached_pillar:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion pillars for: {0}'.format(missed_minions))
if self.pillar_fallback:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion pillars for: {0}'.format(missed_minions))
if self.pillar_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _tgt_to_list(self):
# Return a list of minion ids that match the target and expr_form
minion_ids = []
ckminions = salt.utils.minions.CkMinions(self.opts)
minion_ids = ckminions.check_minions(self.tgt, self.expr_form)
if len(minion_ids) == 0:
log.debug('No minions matched for tgt="{0}" and expr_form="{1}"'.format(self.tgt, self.expr_form))
return {}
log.debug('Matching minions for tgt="{0}" and expr_form="{1}": {2}'.format(self.tgt, self.expr_form, minion_ids))
return minion_ids
def get_minion_pillar(self):
'''
Get pillar data for the targeted minions, either by fetching the
cached minion data on the master, or by compiling the minion's
pillar data on the master.
For runner modules that need access minion pillar data, this
function should be used instead of getting the pillar data by
executing the pillar module on the minions.
By default, this function tries hard to get the pillar data:
- Try to get the cached minion grains and pillar if the
master has minion_data_cache: True
- If the pillar data for the minion is cached, use it.
- If there is no cached grains/pillar data for a minion,
then try to get the minion grains directly from the minion.
- Use the minion grains to compile the pillar directly from the
master using salt.pillar.Pillar
'''
minion_pillars = {}
minion_grains = {}
minion_ids = self._tgt_to_list()
if any(arg for arg in [self.use_cached_grains, self.use_cached_pillar, self.grains_fallback, self.pillar_fallback]):
log.debug('Getting cached minion data')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
cached_minion_pillars = {}
log.debug('Getting minion grain data for: {0}'.format(minion_ids))
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
log.debug('Getting minion pillar data for: {0}'.format(minion_ids))
minion_pillars = self._get_minion_pillar(
*minion_ids,
grains=minion_grains,
cached_pillar=cached_minion_pillars)
return minion_pillars
def get_minion_grains(self):
'''
Get grains data for the targeted minions, either by fetching the
cached minion data on the master, or by fetching the grains
directly on the minion.
By default, this function tries hard to get the pillar data:
- Try to get the cached minion grains if the master
has minion_data_cache: True
- If the grains data for the minion is cached, use it.
- If there is no cached grains data for a minion,
then try to get the minion grains directly from the minion.
'''
minion_grains = {}
minion_ids = self._tgt_to_list()
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
log.debug('Getting cached minion data.')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
log.debug('Getting minion grain data for: {0}'.format(minion_ids))
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
return minion_grains
def get_cached_mine_data(self):
'''
Get cached mine data for the targeted minions.
'''
mine_data = {}
minion_ids = self._tgt_to_list()
log.debug('Getting cached mine data for: {0}'.format(minion_ids))
mine_data = self._get_cached_mine_data(*minion_ids)
return mine_data
def clear_cached_minion_data(self,
clear_pillar=False,
clear_grains=False,
clear_mine=False,
clear_mine_func=None):
'''
Clear the cached data/files for the targeted minions.
'''
clear_what = []
if clear_pillar:
clear_what.append('pillar')
if clear_grains:
clear_what.append('grains')
if clear_mine:
clear_what.append('mine')
if clear_mine_func is not None:
clear_what.append('mine_func: {0!r}'.format(clear_mine_func))
if not len(clear_what):
log.debug('No cached data types specified for clearing.')
return False
minion_ids = self._tgt_to_list()
log.debug('Clearing cached {0} data for: {1}'.format(
', '.join(clear_what),
minion_ids))
if clear_pillar == clear_grains:
# clear_pillar and clear_grains are both True or both False.
# This means we don't deal with pillar/grains caches at all.
grains = {}
pillars = {}
else:
# Unless both clear_pillar and clear_grains are True, we need
# to read in the pillar/grains data since they are both stored
# in the same file, 'data.p'
grains, pillars = self._get_cached_minion_data(*minion_ids)
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
cdir = os.path.join(self.opts['cachedir'], 'minions', minion_id)
if not os.path.isdir(cdir):
# Cache dir for this minion does not exist. Nothing to do.
continue
data_file = os.path.join(cdir, 'data.p')
mine_file = os.path.join(cdir, 'mine.p')
minion_pillar = pillars.pop(minion_id, False)
minion_grains = grains.pop(minion_id, False)
if ((clear_pillar and clear_grains) or
(clear_pillar and not minion_grains) or
(clear_grains and not minion_pillar)):
# Not saving pillar or grains, so just delete the cache file
os.remove(os.path.join(data_file))
elif clear_pillar and minion_grains:
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps({'grains': minion_grains}))
salt.utils.atomicfile.atomic_rename(tmpfname, data_file)
elif clear_grains and minion_pillar:
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps({'pillar': minion_pillar}))
salt.utils.atomicfile.atomic_rename(tmpfname, data_file)
if clear_mine:
# Delete the whole mine file
os.remove(os.path.join(mine_file))
elif clear_mine_func is not None:
# Delete a specific function from the mine file
with salt.utils.fopen(mine_file, 'rb') as fp_:
mine_data = self.serial.loads(fp_.read())
if isinstance(mine_data, dict):
if mine_data.pop(clear_mine_func, False):
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
salt.utils.atomicfile.atomic_rename(
tmpfname,
mine_file)
except (OSError, IOError):
return True
return True
class CacheTimer(Thread):
'''
A basic timer class the fires timer-events every second.
This is used for cleanup by the ConnectedCache()
'''
def __init__(self, opts, event):
Thread.__init__(self)
self.opts = opts
self.stopped = event
self.daemon = True
self.serial = salt.payload.Serial(opts.get('serial', ''))
self.timer_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
def run(self):
'''
main loop that fires the event every second
'''
context = zmq.Context()
# the socket for outgoing timer events
socket = context.socket(zmq.PUB)
socket.setsockopt(zmq.LINGER, 100)
socket.bind('ipc://' + self.timer_sock)
count = 0
log.debug('ConCache-Timer started')
while not self.stopped.wait(1):
socket.send(self.serial.dumps(count))
count += 1
if count >= 60:
count = 0
class CacheWorker(multiprocessing.Process):
'''
Worker for ConnectedCache which runs in its
own process to prevent blocking of ConnectedCache
main-loop when refreshing minion-list
'''
def __init__(self, opts):
'''
Sets up the zmq-connection to the ConCache
'''
super(CacheWorker, self).__init__()
self.opts = opts
def run(self):
'''
Gather currently connected minions and update the cache
'''
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
cc = cache_cli(self.opts)
cc.get_cached()
cc.put_cache([new_mins])
log.debug('ConCache CacheWorker update finished')
class ConnectedCache(multiprocessing.Process):
'''
Provides access to all minions ids that the master has
successfully authenticated. The cache is cleaned up regularly by
comparing it to the IPs that have open connections to
the master publisher port.
'''
def __init__(self, opts):
'''
starts the timer and inits the cache itself
'''
super(ConnectedCache, self).__init__()
log.debug('ConCache initializing...')
# the possible settings for the cache
self.opts = opts
# the actual cached minion ids
self.minions = []
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
self.cleanup()
# the timer provides 1-second intervals to the loop in run()
# to make the cache system most responsive, we do not use a loop-
# delay which makes it hard to get 1-second intervals without a timer
self.timer_stop = Event()
self.timer = CacheTimer(self.opts, self.timer_stop)
self.timer.start()
self.running = True
def signal_handler(self, sig, frame):
'''
handle signals and shutdown
'''
self.stop()
def cleanup(self):
'''
remove sockets on shutdown
'''
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock)
if os.path.exists(self.update_sock):
os.remove(self.update_sock)
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock)
def secure(self):
'''
secure the sockets for root-only access
'''
log.debug('ConCache securing sockets')
if os.path.exists(self.cache_sock):
os.chmod(self.cache_sock, 0o600)
if os.path.exists(self.update_sock):
os.chmod(self.update_sock, 0o600)
if os.path.exists(self.upd_t_sock):
os.chmod(self.upd_t_sock, 0o600)
def stop(self):
'''
shutdown cache process
'''
# avoid getting called twice
self.cleanup()
if self.running:
self.running = False
self.timer_stop.set()
self.timer.join()
def run(self):
'''
Main loop of the ConCache, starts updates in intervals and
answers requests from the MWorkers
'''
context = zmq.Context()
# the socket for incoming cache requests
creq_in = context.socket(zmq.REP)
creq_in.setsockopt(zmq.LINGER, 100)
creq_in.bind('ipc://' + self.cache_sock)
# the socket for incoming cache-updates from workers
cupd_in = context.socket(zmq.SUB)
cupd_in.setsockopt(zmq.SUBSCRIBE, '')
cupd_in.setsockopt(zmq.LINGER, 100)
cupd_in.bind('ipc://' + self.update_sock)
# the socket for the timer-event
timer_in = context.socket(zmq.SUB)
timer_in.setsockopt(zmq.SUBSCRIBE, '')
timer_in.setsockopt(zmq.LINGER, 100)
timer_in.connect('ipc://' + self.upd_t_sock)
poller = zmq.Poller()
poller.register(creq_in, zmq.POLLIN)
poller.register(cupd_in, zmq.POLLIN)
poller.register(timer_in, zmq.POLLIN)
# our serializer
serial = salt.payload.Serial(self.opts.get('serial', ''))
# register a signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# secure the sockets from the world
self.secure()
log.info('ConCache started')
while self.running:
# we check for new events with the poller
try:
socks = dict(poller.poll(1))
except KeyboardInterrupt:
self.stop()
except zmq.ZMQError as zmq_err:
log.error('ConCache ZeroMQ-Error occurred')
log.exception(zmq_err)
self.stop()
# check for next cache-request
if socks.get(creq_in) == zmq.POLLIN:
msg = serial.loads(creq_in.recv())
log.debug('ConCache Received request: {0}'.format(msg))
# requests to the minion list are send as str's
if isinstance(msg, str):
if msg == 'minions':
# Send reply back to client
reply = serial.dumps(self.minions)
creq_in.send(reply)
# check for next cache-update from workers
if socks.get(cupd_in) == zmq.POLLIN:
new_c_data = serial.loads(cupd_in.recv())
# tell the worker to exit
#cupd_in.send(serial.dumps('ACK'))
# check if the returned data is usable
if not isinstance(new_c_data, list):
log.error('ConCache Worker returned unusable result')
del new_c_data
continue
# the cache will receive lists of minions
# 1. if the list only has 1 item, its from an MWorker, we append it
# 2. if the list contains another list, its from a CacheWorker and
# the currently cached minions are replaced with that list
# 3. anything else is considered malformed
try:
if len(new_c_data) == 0:
log.debug('ConCache Got empty update from worker')
continue
data = new_c_data[0]
if isinstance(data, str):
if data not in self.minions:
log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0]))
self.minions.append(data)
elif isinstance(data, list):
log.debug('ConCache Replacing minion list from worker')
self.minions = data
except IndexError:
log.debug('ConCache Got malformed result dict from worker')
del new_c_data
log.info('ConCache {0} entries in cache'.format(len(self.minions)))
# check for next timer-event to start new jobs
if socks.get(timer_in) == zmq.POLLIN:
sec_event = serial.loads(timer_in.recv())
# update the list every 30 seconds
if int(sec_event % 30) == 0:
cw = CacheWorker(self.opts)
cw.start()
self.stop()
creq_in.close()
cupd_in.close()
timer_in.close()
context.term()
log.debug('ConCache Shutting down')
def ping_all_connected_minions(opts):
client = salt.client.LocalClient()
ckminions = salt.utils.minions.CkMinions(opts)
client.cmd(list(ckminions.connected_ids()), 'test.ping', expr_form='list')
# test code for the ConCache class
if __name__ == '__main__':
opts = salt.config.master_config('/etc/salt/master')
conc = ConnectedCache(opts)
conc.start()
|
smallyear/linuxLearn
|
salt/salt/utils/master.py
|
Python
|
apache-2.0
| 27,724
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import splitext, basename
class GccArm(Exporter):
NAME = 'GccArm'
TOOLCHAIN = 'GCC_ARM'
TARGETS = [
'LPC1768',
'LPC1549',
'KL05Z',
'KL25Z',
'KL43Z',
'KL46Z',
'K64F',
'K22F',
'K20D50M',
'LPC4088',
'LPC4088_DM',
'LPC4330_M4',
'LPC11U24',
'LPC1114',
'LPC11U35_401',
'LPC11U35_501',
'LPC11U37H_401',
'LPC810',
'LPC812',
'LPC824',
'SSCI824',
'STM32F407',
'DISCO_F100RB',
'DISCO_F051R8',
'DISCO_F407VG',
'DISCO_F303VC',
'DISCO_F746NG',
'DISCO_L476VG',
'UBLOX_C027',
'ARCH_PRO',
'NRF51822',
'HRM1017',
'RBLAB_NRF51822',
'LPC2368',
'LPC2460',
'LPCCAPPUCCINO',
'ARCH_BLE',
'MTS_GAMBIT',
'ARCH_MAX',
'NUCLEO_F401RE',
'NUCLEO_F411RE',
'NUCLEO_F446RE',
'ARCH_MAX',
'DISCO_F429ZI',
'NUCLEO_F030R8',
'NUCLEO_F070RB',
'NUCLEO_F072RB',
'NUCLEO_F091RC',
'NUCLEO_F103RB',
'NUCLEO_F302R8',
'NUCLEO_F303RE',
'NUCLEO_F334R8',
'DISCO_L053C8',
'NUCLEO_L053R8',
'NUCLEO_L073RZ',
'NUCLEO_L476RG',
'DISCO_F334C8',
'MAX32600MBED',
'MAXWSNENV',
'MTS_MDOT_F405RG',
'MTS_MDOT_F411RE',
'NUCLEO_L152RE',
'NRF51_DK',
'NRF51_DONGLE',
'SEEED_TINY_BLE',
'DISCO_F401VC',
'DELTA_DFCM_NNN40',
'RZ_A1H',
'MOTE_L152RC',
'EFM32WG_STK3800',
'EFM32LG_STK3600',
'EFM32GG_STK3700',
'EFM32ZG_STK3200',
'EFM32HG_STK3400',
'NZ32SC151',
'SAMR21G18A',
'TEENSY3_1',
'SAMD21J18A',
]
DOT_IN_RELATIVE_PATH = True
def generate(self):
# "make" wants Unix paths
self.resources.win_to_unix()
to_be_compiled = []
for r_type in ['s_sources', 'c_sources', 'cpp_sources']:
r = getattr(self.resources, r_type)
if r:
for source in r:
base, ext = splitext(source)
to_be_compiled.append(base + '.o')
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
ctx = {
'name': self.program_name,
'to_be_compiled': to_be_compiled,
'object_files': self.resources.objects,
'include_paths': self.resources.inc_dirs,
'library_paths': self.resources.lib_dirs,
'linker_script': self.resources.linker_script,
'libraries': libraries,
'symbols': self.get_symbols(),
'cpu_flags': self.toolchain.cpu
}
self.gen_file('gcc_arm_%s.tmpl' % self.target.lower(), ctx, 'Makefile')
|
fpiot/mbed-ats
|
workspace_tools/export/gccarm.py
|
Python
|
apache-2.0
| 3,603
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Intel Corp.
#
"""
Tests to test the interface for resource_control.
"""
import unittest
from ..resource_control import ResourceControl
class TestResourceControl(unittest.TestCase):
"""class to run the UT for the resource_control interface."""
def test_all_tests(self):
"""All tests."""
interface = ResourceControl()
interface.remove_nodes_from_resource_pool("localhost")
interface.add_nodes_to_resource_pool("localhost")
interface.check_nodes_state("localhost")
interface.check_resource_manager_running()
|
intel-ctrlsys/actsys
|
actsys/control/resource/tests/test_resource_control.py
|
Python
|
apache-2.0
| 610
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListFeatures
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_async]
from google.cloud import aiplatform_v1
async def sample_list_features():
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_async]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py
|
Python
|
apache-2.0
| 1,555
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
import hashlib
import json
import time
import unittest
from mock import patch
from hashlib import md5
from swift.common import swob, utils
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.middleware import slo
from swift.common.swob import Request, Response, HTTPException
from swift.common.utils import quote, closing_if_possible
from test.unit.common.middleware.helpers import FakeSwift
test_xml_data = '''<?xml version="1.0" encoding="UTF-8"?>
<static_large_object>
<object_segment>
<path>/cont/object</path>
<etag>etagoftheobjectsegment</etag>
<size_bytes>100</size_bytes>
</object_segment>
</static_large_object>
'''
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
def fake_start_response(*args, **kwargs):
pass
def md5hex(s):
return hashlib.md5(s).hexdigest()
class SloTestCase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.slo = slo.filter_factory({})(self.app)
self.slo.min_segment_size = 1
self.slo.logger = self.app.logger
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
req.headers.setdefault("User-Agent", "Mozzarella Foxfire")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = ''
caught_exc = None
try:
# appease the close-checker
with closing_if_possible(body_iter):
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_slo(self, req, **kwargs):
return self.call_app(req, app=self.slo, **kwargs)
class TestSloMiddleware(SloTestCase):
def setUp(self):
super(TestSloMiddleware, self).setUp()
self.app.register(
'GET', '/', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/', swob.HTTPOk, {}, 'passed')
def test_handle_multipart_no_obj(self):
req = Request.blank('/')
resp_iter = self.slo(req.environ, fake_start_response)
self.assertEqual(self.app.calls, [('GET', '/')])
self.assertEqual(''.join(resp_iter), 'passed')
def test_slo_header_assigned(self):
req = Request.blank(
'/v1/a/c/o', headers={'x-static-large-object': "true"},
environ={'REQUEST_METHOD': 'PUT'})
resp = ''.join(self.slo(req.environ, fake_start_response))
self.assertTrue(
resp.startswith('X-Static-Large-Object is a reserved header'))
def _put_bogus_slo(self, manifest_text,
manifest_path='/v1/a/c/the-manifest',
min_segment_size=1):
with self.assertRaises(HTTPException) as catcher:
slo.parse_and_validate_input(manifest_text, manifest_path,
min_segment_size)
self.assertEqual(400, catcher.exception.status_int)
return catcher.exception.body
def _put_slo(self, manifest_text, manifest_path='/v1/a/c/the-manifest',
min_segment_size=1):
return slo.parse_and_validate_input(manifest_text, manifest_path,
min_segment_size)
def test_bogus_input(self):
self.assertEqual('Manifest must be valid JSON.\n',
self._put_bogus_slo('some non json'))
self.assertEqual('Manifest must be a list.\n',
self._put_bogus_slo('{}'))
self.assertEqual('Index 0: not a JSON object\n',
self._put_bogus_slo('["zombocom"]'))
def test_bogus_input_bad_keys(self):
self.assertEqual(
"Index 0: extraneous keys \"baz\", \"foo\"\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100,
'foo': 'bar', 'baz': 'quux'}])))
def test_bogus_input_ranges(self):
self.assertEqual(
"Index 0: invalid range\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah',
'size_bytes': 100, 'range': 'non-range value'}])))
self.assertEqual(
"Index 0: multiple ranges (only one allowed)\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah',
'size_bytes': 100, 'range': '1-20,30-40'}])))
def test_bogus_input_unsatisfiable_range(self):
self.assertEqual(
"Index 0: unsatisfiable range\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah',
'size_bytes': 100, 'range': '8888-9999'}])))
# since size is optional, we have to be able to defer this check
segs = self._put_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah',
'size_bytes': None, 'range': '8888-9999'}]))
self.assertEqual(1, len(segs))
def test_bogus_input_path(self):
self.assertEqual(
"Index 0: path does not refer to an object. Path must be of the "
"form /container/object.\n"
"Index 1: path does not refer to an object. Path must be of the "
"form /container/object.\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100},
{'path': '/c-trailing-slash/', 'etag': 'e',
'size_bytes': 100},
{'path': '/con/obj', 'etag': 'e',
'size_bytes': 100},
{'path': '/con/obj-trailing-slash/', 'etag': 'e',
'size_bytes': 100},
{'path': '/con/obj/with/slashes', 'etag': 'e',
'size_bytes': 100}])))
def test_bogus_input_multiple(self):
self.assertEqual(
"Index 0: invalid range\nIndex 1: not a JSON object\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100, 'range': 'non-range value'},
None])))
def test_bogus_input_size_bytes(self):
self.assertEqual(
"Index 0: invalid size_bytes\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah', 'size_bytes': "fht"},
{'path': '/cont/object', 'etag': 'blah', 'size_bytes': None},
{'path': '/cont/object', 'etag': 'blah', 'size_bytes': 100}],
)))
self.assertEqual(
"Index 0: invalid size_bytes\n",
self._put_bogus_slo(json.dumps(
[{'path': '/cont/object', 'etag': 'blah', 'size_bytes': []}],
)))
def test_bogus_input_self_referential(self):
self.assertEqual(
"Index 0: manifest must not include itself as a segment\n",
self._put_bogus_slo(json.dumps(
[{'path': '/c/the-manifest', 'etag': 'gate',
'size_bytes': 100, 'range': 'non-range value'}])))
def test_bogus_input_self_referential_non_ascii(self):
self.assertEqual(
"Index 0: manifest must not include itself as a segment\n",
self._put_bogus_slo(
json.dumps([{'path': u'/c/あ_1',
'etag': 'a', 'size_bytes': 1}]),
manifest_path=quote(u'/v1/a/c/あ_1')))
def test_bogus_input_self_referential_last_segment(self):
test_json_data = json.dumps([
{'path': '/c/seg_1', 'etag': 'a', 'size_bytes': 1},
{'path': '/c/seg_2', 'etag': 'a', 'size_bytes': 1},
{'path': '/c/seg_3', 'etag': 'a', 'size_bytes': 1},
{'path': '/c/the-manifest', 'etag': 'a', 'size_bytes': 1},
])
self.assertEqual(
"Index 3: manifest must not include itself as a segment\n",
self._put_bogus_slo(
test_json_data,
manifest_path=quote('/v1/a/c/the-manifest')))
def test_bogus_input_undersize_segment(self):
self.assertEqual(
"Index 1: too small; each segment, except the last, "
"must be at least 1000 bytes.\n"
"Index 2: too small; each segment, except the last, "
"must be at least 1000 bytes.\n",
self._put_bogus_slo(
json.dumps([
{'path': u'/c/s1', 'etag': 'a', 'size_bytes': 1000},
{'path': u'/c/s2', 'etag': 'b', 'size_bytes': 999},
{'path': u'/c/s3', 'etag': 'c', 'size_bytes': 998},
# No error for this one since size_bytes is unspecified
{'path': u'/c/s4', 'etag': 'd', 'size_bytes': None},
{'path': u'/c/s5', 'etag': 'e', 'size_bytes': 996}]),
min_segment_size=1000))
def test_valid_input(self):
data = json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
self.assertEqual(
'/cont/object',
slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)[0]['path'])
data = json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100, 'range': '0-40'}])
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)
self.assertEqual('/cont/object', parsed[0]['path'])
self.assertEqual([(0, 40)], parsed[0]['range'].ranges)
data = json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': None, 'range': '0-40'}])
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)
self.assertEqual('/cont/object', parsed[0]['path'])
self.assertEqual(None, parsed[0]['size_bytes'])
self.assertEqual([(0, 40)], parsed[0]['range'].ranges)
class TestSloPutManifest(SloTestCase):
def setUp(self):
super(TestSloPutManifest, self).setUp()
self.app.register(
'GET', '/', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/AUTH_test/cont/object',
swob.HTTPOk,
{'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', '/v1/AUTH_test/cont/object2',
swob.HTTPOk,
{'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', '/v1/AUTH_test/cont/object\xe2\x99\xa1',
swob.HTTPOk,
{'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', '/v1/AUTH_test/cont/small_object',
swob.HTTPOk,
{'Content-Length': '10', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', u'/v1/AUTH_test/cont/あ_1',
swob.HTTPOk,
{'Content-Length': '1', 'Etag': 'a'},
None)
self.app.register(
'PUT', '/v1/AUTH_test/c/man', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/c/man', swob.HTTPNoContent, {}, None)
self.app.register(
'HEAD', '/v1/AUTH_test/checktest/a_1',
swob.HTTPOk,
{'Content-Length': '1', 'Etag': 'a'},
None)
self.app.register(
'HEAD', '/v1/AUTH_test/checktest/badreq',
swob.HTTPBadRequest, {}, None)
self.app.register(
'HEAD', '/v1/AUTH_test/checktest/b_2',
swob.HTTPOk,
{'Content-Length': '2', 'Etag': 'b',
'Last-Modified': 'Fri, 01 Feb 2012 20:38:36 GMT'},
None)
self.app.register(
'GET', '/v1/AUTH_test/checktest/slob',
swob.HTTPOk,
{'X-Static-Large-Object': 'true', 'Etag': 'slob-etag'},
None)
self.app.register(
'PUT', '/v1/AUTH_test/checktest/man_3', swob.HTTPCreated, {}, None)
def test_put_manifest_too_quick_fail(self):
req = Request.blank('/v1/a/c/o')
req.content_length = self.slo.max_manifest_size + 1
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 413)
with patch.object(self.slo, 'max_manifest_segments', 0):
req = Request.blank('/v1/a/c/o', body=test_json_data)
e = None
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 413)
with patch.object(self.slo, 'min_segment_size', 1000):
test_json_data_2obj = json.dumps(
[{'path': '/cont/small_object1',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10},
{'path': '/cont/small_object2',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank('/v1/a/c/o', body=test_json_data_2obj)
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 400)
req = Request.blank('/v1/a/c/o', headers={'X-Copy-From': 'lala'})
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 405)
# ignores requests to /
req = Request.blank(
'/?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data)
self.assertEqual(
list(self.slo.handle_multipart_put(req, fake_start_response)),
['passed'])
def test_handle_multipart_put_success(self):
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
def my_fake_start_response(*args, **kwargs):
gen_etag = '"' + md5('etagoftheobjectsegment').hexdigest() + '"'
self.assertTrue(('Etag', gen_etag) in args[1])
self.slo(req.environ, my_fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_success_allow_small_last_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
self.slo(req.environ, fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_success_allow_only_one_small_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
self.slo(req.environ, fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_disallow_small_first_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank('/v1/a/c/o', body=test_json_data)
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 400)
def test_handle_multipart_put_success_unicode(self):
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
self.slo(req.environ, fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
self.assertTrue(req.environ['PATH_INFO'], '/cont/object\xe2\x99\xa1')
def test_handle_multipart_put_no_xml(self):
req = Request.blank(
'/test_good/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_xml_data)
no_xml = self.slo(req.environ, fake_start_response)
self.assertEqual(no_xml, ['Manifest must be valid JSON.\n'])
def test_handle_multipart_put_bad_data(self):
bad_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobj',
'size_bytes': 'lala'}])
req = Request.blank(
'/test_good/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
self.assertRaises(HTTPException, self.slo.handle_multipart_put, req,
fake_start_response)
for bad_data in [
json.dumps([{'path': '/cont', 'etag': 'etagoftheobj',
'size_bytes': 100}]),
json.dumps('asdf'), json.dumps(None), json.dumps(5),
'not json', '1234', None, '', json.dumps({'path': None}),
json.dumps([{'path': '/cont/object', 'etag': None,
'size_bytes': 12}]),
json.dumps([{'path': '/cont/object', 'etag': 'asdf',
'size_bytes': 'sd'}]),
json.dumps([{'path': 12, 'etag': 'etagoftheobj',
'size_bytes': 100}]),
json.dumps([{'path': u'/cont/object\u2661',
'etag': 'etagoftheobj', 'size_bytes': 100}]),
json.dumps([{'path': 12, 'size_bytes': 100}]),
json.dumps([{'path': 12, 'size_bytes': 100}]),
json.dumps([{'path': '/c/o', 'etag': 123, 'size_bytes': 100}]),
json.dumps([{'path': None, 'etag': 'etagoftheobj',
'size_bytes': 100}])]:
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
self.assertRaises(HTTPException, self.slo.handle_multipart_put,
req, fake_start_response)
def test_handle_multipart_put_check_data(self):
good_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': 'a', 'size_bytes': '1'},
{'path': '/checktest/b_2', 'etag': 'b', 'size_bytes': '2'}])
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
status, headers, body = self.call_slo(req)
self.assertEqual(self.app.call_count, 3)
# go behind SLO's back and see what actually got stored
req = Request.blank(
# this string looks weird, but it's just an artifact
# of FakeSwift
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_app(req)
headers = dict(headers)
manifest_data = json.loads(body)
self.assertTrue(headers['Content-Type'].endswith(';swift_bytes=3'))
self.assertEqual(len(manifest_data), 2)
self.assertEqual(manifest_data[0]['hash'], 'a')
self.assertEqual(manifest_data[0]['bytes'], 1)
self.assertTrue(
not manifest_data[0]['last_modified'].startswith('2012'))
self.assertTrue(manifest_data[1]['last_modified'].startswith('2012'))
def test_handle_multipart_put_check_data_bad(self):
bad_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': 'a', 'size_bytes': '2'},
{'path': '/checktest/badreq', 'etag': 'a', 'size_bytes': '1'},
{'path': '/checktest/b_2', 'etag': 'not-b', 'size_bytes': '2'},
{'path': '/checktest/slob', 'etag': 'not-slob',
'size_bytes': '2'}])
req = Request.blank(
'/v1/AUTH_test/checktest/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Accept': 'application/json'},
body=bad_data)
status, headers, body = self.call_slo(req)
self.assertEqual(self.app.call_count, 5)
errors = json.loads(body)['Errors']
self.assertEqual(len(errors), 5)
self.assertEqual(errors[0][0], '/checktest/a_1')
self.assertEqual(errors[0][1], 'Size Mismatch')
self.assertEqual(errors[1][0], '/checktest/badreq')
self.assertEqual(errors[1][1], '400 Bad Request')
self.assertEqual(errors[2][0], '/checktest/b_2')
self.assertEqual(errors[2][1], 'Etag Mismatch')
self.assertEqual(errors[3][0], '/checktest/slob')
self.assertEqual(errors[3][1], 'Size Mismatch')
self.assertEqual(errors[4][0], '/checktest/slob')
self.assertEqual(errors[4][1], 'Etag Mismatch')
def test_handle_multipart_put_skip_size_check(self):
good_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': 'a', 'size_bytes': None},
{'path': '/checktest/b_2', 'etag': 'b', 'size_bytes': None}])
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
status, headers, body = self.call_slo(req)
self.assertEqual(self.app.call_count, 3)
# Check that we still populated the manifest properly from our HEADs
req = Request.blank(
# this string looks weird, but it's just an artifact
# of FakeSwift
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_app(req)
manifest_data = json.loads(body)
self.assertEqual(1, manifest_data[0]['bytes'])
self.assertEqual(2, manifest_data[1]['bytes'])
def test_handle_multipart_put_skip_size_check_still_uses_min_size(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': None},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as cm:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(cm.exception.status_int, 400)
def test_handle_multipart_put_skip_size_check_no_early_bailout(self):
with patch.object(self.slo, 'min_segment_size', 50):
# The first is too small (it's 10 bytes but min size is 50), and
# the second has a bad etag. Make sure both errors show up in
# the response.
test_json_data = json.dumps([{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': None},
{'path': '/cont/object2',
'etag': 'wrong wrong wrong',
'size_bytes': 100}])
req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as cm:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(cm.exception.status_int, 400)
self.assertIn('at least 50 bytes', cm.exception.body)
self.assertIn('Etag Mismatch', cm.exception.body)
def test_handle_multipart_put_skip_etag_check(self):
good_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': None, 'size_bytes': 1},
{'path': '/checktest/b_2', 'etag': None, 'size_bytes': 2}])
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
status, headers, body = self.call_slo(req)
self.assertEqual(self.app.call_count, 3)
# Check that we still populated the manifest properly from our HEADs
req = Request.blank(
# this string looks weird, but it's just an artifact
# of FakeSwift
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_app(req)
manifest_data = json.loads(body)
self.assertEqual('a', manifest_data[0]['hash'])
self.assertEqual('b', manifest_data[1]['hash'])
def test_handle_unsatisfiable_ranges(self):
bad_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': None,
'size_bytes': None, 'range': '1-'}])
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(400, catcher.exception.status_int)
self.assertIn("Unsatisfiable Range", catcher.exception.body)
def test_handle_single_ranges(self):
good_data = json.dumps(
[{'path': '/checktest/a_1', 'etag': None,
'size_bytes': None, 'range': '0-0'},
{'path': '/checktest/b_2', 'etag': None,
'size_bytes': 2, 'range': '-1'},
{'path': '/checktest/b_2', 'etag': None,
'size_bytes': 2, 'range': '0-0'},
{'path': '/cont/object', 'etag': None,
'size_bytes': None, 'range': '10-40'}])
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
status, headers, body = self.call_slo(req)
expected_etag = '"%s"' % md5('ab:1-1;b:0-0;etagoftheobjectsegment:'
'10-40;').hexdigest()
self.assertEqual(expected_etag, dict(headers)['Etag'])
self.assertEqual([
('HEAD', '/v1/AUTH_test/checktest/a_1'),
('HEAD', '/v1/AUTH_test/checktest/b_2'), # Only once!
('HEAD', '/v1/AUTH_test/cont/object'),
('PUT', '/v1/AUTH_test/checktest/man_3?multipart-manifest=put'),
], self.app.calls)
# Check that we still populated the manifest properly from our HEADs
req = Request.blank(
# this string looks weird, but it's just an artifact
# of FakeSwift
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_app(req)
manifest_data = json.loads(body)
self.assertEqual('a', manifest_data[0]['hash'])
self.assertNotIn('range', manifest_data[0])
self.assertNotIn('segment_bytes', manifest_data[0])
self.assertEqual('b', manifest_data[1]['hash'])
self.assertEqual('1-1', manifest_data[1]['range'])
self.assertEqual('b', manifest_data[2]['hash'])
self.assertEqual('0-0', manifest_data[2]['range'])
self.assertEqual('etagoftheobjectsegment', manifest_data[3]['hash'])
self.assertEqual('10-40', manifest_data[3]['range'])
class TestSloDeleteManifest(SloTestCase):
def setUp(self):
super(TestSloDeleteManifest, self).setUp()
_submanifest_data = json.dumps(
[{'name': '/deltest/b_2', 'hash': 'a', 'bytes': '1'},
{'name': '/deltest/c_3', 'hash': 'b', 'bytes': '2'}])
self.app.register(
'GET', '/v1/AUTH_test/deltest/man_404',
swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/man',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/deltest/gone', 'hash': 'a', 'bytes': '1'},
{'name': '/deltest/b_2', 'hash': 'b', 'bytes': '2'}]))
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/man',
swob.HTTPNoContent, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/man-all-there',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/deltest/b_2', 'hash': 'a', 'bytes': '1'},
{'name': '/deltest/c_3', 'hash': 'b', 'bytes': '2'}]))
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/man-all-there',
swob.HTTPNoContent, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/gone',
swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/a_1',
swob.HTTPOk, {'Content-Length': '1'}, 'a')
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/a_1',
swob.HTTPNoContent, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/b_2',
swob.HTTPNoContent, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/c_3',
swob.HTTPNoContent, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/d_3',
swob.HTTPNoContent, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/manifest-with-submanifest',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/deltest/a_1',
'hash': 'a', 'bytes': '1'},
{'name': '/deltest/submanifest', 'sub_slo': True,
'hash': 'submanifest-etag',
'bytes': len(_submanifest_data)},
{'name': '/deltest/d_3',
'hash': 'd', 'bytes': '3'}]))
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/manifest-with-submanifest',
swob.HTTPNoContent, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
_submanifest_data)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPNoContent, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/manifest-missing-submanifest',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/deltest/a_1', 'hash': 'a', 'bytes': '1'},
{'name': '/deltest/missing-submanifest',
'hash': 'a', 'bytes': '2', 'sub_slo': True},
{'name': '/deltest/d_3', 'hash': 'd', 'bytes': '3'}]))
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/manifest-missing-submanifest',
swob.HTTPNoContent, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/missing-submanifest',
swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/deltest/manifest-badjson',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
"[not {json (at ++++all")
self.app.register(
'GET', '/v1/AUTH_test/deltest/manifest-with-unauth-segment',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/deltest/a_1', 'hash': 'a', 'bytes': '1'},
{'name': '/deltest-unauth/q_17',
'hash': '11', 'bytes': '17'}]))
self.app.register(
'DELETE', '/v1/AUTH_test/deltest/manifest-with-unauth-segment',
swob.HTTPNoContent, {}, None)
self.app.register(
'DELETE', '/v1/AUTH_test/deltest-unauth/q_17',
swob.HTTPUnauthorized, {}, None)
def test_handle_multipart_delete_man(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man',
environ={'REQUEST_METHOD': 'DELETE'})
self.slo(req.environ, fake_start_response)
self.assertEqual(self.app.call_count, 1)
def test_handle_multipart_delete_bad_utf8(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man\xff\xfe?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
resp_data = json.loads(body)
self.assertEqual(resp_data['Response Status'],
'412 Precondition Failed')
def test_handle_multipart_delete_whole_404(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man_404?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(
self.app.calls,
[('GET',
'/v1/AUTH_test/deltest/man_404?multipart-manifest=get')])
self.assertEqual(resp_data['Response Status'], '200 OK')
self.assertEqual(resp_data['Response Body'], '')
self.assertEqual(resp_data['Number Deleted'], 0)
self.assertEqual(resp_data['Number Not Found'], 1)
self.assertEqual(resp_data['Errors'], [])
def test_handle_multipart_delete_segment_404(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(
self.app.calls,
[('GET',
'/v1/AUTH_test/deltest/man?multipart-manifest=get'),
('DELETE',
'/v1/AUTH_test/deltest/gone?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/man?multipart-manifest=delete')])
self.assertEqual(resp_data['Response Status'], '200 OK')
self.assertEqual(resp_data['Number Deleted'], 2)
self.assertEqual(resp_data['Number Not Found'], 1)
def test_handle_multipart_delete_whole(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE'})
self.call_slo(req)
self.assertEqual(
self.app.calls,
[('GET',
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'),
('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'),
('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'),
('DELETE', ('/v1/AUTH_test/deltest/' +
'man-all-there?multipart-manifest=delete'))])
def test_handle_multipart_delete_nested(self):
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-submanifest?' +
'multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE'})
self.call_slo(req)
self.assertEqual(
set(self.app.calls),
set([('GET', '/v1/AUTH_test/deltest/' +
'manifest-with-submanifest?multipart-manifest=get'),
('GET', '/v1/AUTH_test/deltest/' +
'submanifest?multipart-manifest=get'),
('DELETE',
'/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/' +
'submanifest?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/d_3?multipart-manifest=delete'),
('DELETE',
'/v1/AUTH_test/deltest/' +
'manifest-with-submanifest?multipart-manifest=delete')]))
def test_handle_multipart_delete_nested_too_many_segments(self):
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-submanifest?' +
'multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
with patch.object(slo, 'MAX_BUFFERED_SLO_SEGMENTS', 1):
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
resp_data = json.loads(body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Response Body'],
'Too many buffered slo segments to delete.')
def test_handle_multipart_delete_nested_404(self):
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-missing-submanifest' +
'?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/deltest/' +
'manifest-missing-submanifest?multipart-manifest=get'),
('DELETE', '/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'),
('GET', '/v1/AUTH_test/deltest/' +
'missing-submanifest?multipart-manifest=get'),
('DELETE', '/v1/AUTH_test/deltest/d_3?multipart-manifest=delete'),
('DELETE', '/v1/AUTH_test/deltest/' +
'manifest-missing-submanifest?multipart-manifest=delete')])
self.assertEqual(resp_data['Response Status'], '200 OK')
self.assertEqual(resp_data['Response Body'], '')
self.assertEqual(resp_data['Number Deleted'], 3)
self.assertEqual(resp_data['Number Not Found'], 1)
self.assertEqual(resp_data['Errors'], [])
def test_handle_multipart_delete_nested_401(self):
self.app.register(
'GET', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPUnauthorized, {}, None)
req = Request.blank(
('/v1/AUTH_test/deltest/manifest-with-submanifest' +
'?multipart-manifest=delete'),
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
resp_data = json.loads(body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Errors'],
[['/deltest/submanifest', '401 Unauthorized']])
def test_handle_multipart_delete_nested_500(self):
self.app.register(
'GET', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPServerError, {}, None)
req = Request.blank(
('/v1/AUTH_test/deltest/manifest-with-submanifest' +
'?multipart-manifest=delete'),
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
resp_data = json.loads(body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Errors'],
[['/deltest/submanifest',
'Unable to load SLO manifest or segment.']])
def test_handle_multipart_delete_not_a_manifest(self):
req = Request.blank(
'/v1/AUTH_test/deltest/a_1?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/deltest/a_1?multipart-manifest=get')])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Response Body'], '')
self.assertEqual(resp_data['Number Deleted'], 0)
self.assertEqual(resp_data['Number Not Found'], 0)
self.assertEqual(resp_data['Errors'],
[['/deltest/a_1', 'Not an SLO manifest']])
def test_handle_multipart_delete_bad_json(self):
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-badjson?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(self.app.calls,
[('GET', '/v1/AUTH_test/deltest/' +
'manifest-badjson?multipart-manifest=get')])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Response Body'], '')
self.assertEqual(resp_data['Number Deleted'], 0)
self.assertEqual(resp_data['Number Not Found'], 0)
self.assertEqual(resp_data['Errors'],
[['/deltest/manifest-badjson',
'Unable to load SLO manifest']])
def test_handle_multipart_delete_401(self):
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-unauth-segment' +
'?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
resp_data = json.loads(body)
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/deltest/' +
'manifest-with-unauth-segment?multipart-manifest=get'),
('DELETE', '/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'),
('DELETE', '/v1/AUTH_test/deltest-unauth/' +
'q_17?multipart-manifest=delete'),
('DELETE', '/v1/AUTH_test/deltest/' +
'manifest-with-unauth-segment?multipart-manifest=delete')])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Response Body'], '')
self.assertEqual(resp_data['Number Deleted'], 2)
self.assertEqual(resp_data['Number Not Found'], 0)
self.assertEqual(resp_data['Errors'],
[['/deltest-unauth/q_17', '401 Unauthorized']])
def test_handle_multipart_delete_client_content_type(self):
req = Request.blank(
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE', 'CONTENT_TYPE': 'foo/bar'},
headers={'Accept': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
resp_data = json.loads(body)
self.assertEqual(resp_data["Number Deleted"], 3)
self.assertEqual(
self.app.calls,
[('GET',
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'),
('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'),
('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'),
('DELETE', ('/v1/AUTH_test/deltest/' +
'man-all-there?multipart-manifest=delete'))])
class TestSloHeadManifest(SloTestCase):
def setUp(self):
super(TestSloHeadManifest, self).setUp()
self._manifest_json = json.dumps([
{'name': '/gettest/seg01',
'bytes': '100',
'hash': 'seg01-hash',
'content_type': 'text/plain',
'last_modified': '2013-11-19T11:33:45.137446'},
{'name': '/gettest/seg02',
'bytes': '200',
'hash': 'seg02-hash',
'content_type': 'text/plain',
'last_modified': '2013-11-19T11:33:45.137447'}])
self.app.register(
'GET', '/v1/AUTH_test/headtest/man',
swob.HTTPOk, {'Content-Length': str(len(self._manifest_json)),
'X-Static-Large-Object': 'true',
'Etag': md5(self._manifest_json).hexdigest()},
self._manifest_json)
def test_etag_is_hash_of_segment_etags(self):
req = Request.blank(
'/v1/AUTH_test/headtest/man',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers.get('Etag', '').strip("'\""),
md5("seg01-hashseg02-hash").hexdigest())
self.assertEqual(body, '') # it's a HEAD request, after all
def test_etag_matching(self):
etag = md5("seg01-hashseg02-hash").hexdigest()
req = Request.blank(
'/v1/AUTH_test/headtest/man',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': etag})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '304 Not Modified')
class TestSloGetManifest(SloTestCase):
def setUp(self):
super(TestSloGetManifest, self).setUp()
# some plain old objects
self.app.register(
'GET', '/v1/AUTH_test/gettest/a_5',
swob.HTTPOk, {'Content-Length': '5',
'Etag': md5hex('a' * 5)},
'a' * 5)
self.app.register(
'GET', '/v1/AUTH_test/gettest/b_10',
swob.HTTPOk, {'Content-Length': '10',
'Etag': md5hex('b' * 10)},
'b' * 10)
self.app.register(
'GET', '/v1/AUTH_test/gettest/c_15',
swob.HTTPOk, {'Content-Length': '15',
'Etag': md5hex('c' * 15)},
'c' * 15)
self.app.register(
'GET', '/v1/AUTH_test/gettest/d_20',
swob.HTTPOk, {'Content-Length': '20',
'Etag': md5hex('d' * 20)},
'd' * 20)
_bc_manifest_json = json.dumps(
[{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10',
'content_type': 'text/plain'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15), 'bytes': '15',
'content_type': 'text/plain'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-bc',
swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Plant': 'Ficus',
'Etag': md5hex(_bc_manifest_json)},
_bc_manifest_json)
_abcd_manifest_json = json.dumps(
[{'name': '/gettest/a_5', 'hash': md5hex("a" * 5),
'content_type': 'text/plain', 'bytes': '5'},
{'name': '/gettest/manifest-bc', 'sub_slo': True,
'content_type': 'application/json;swift_bytes=25',
'hash': md5hex(md5hex("b" * 10) + md5hex("c" * 15)),
'bytes': len(_bc_manifest_json)},
{'name': '/gettest/d_20', 'hash': md5hex("d" * 20),
'content_type': 'text/plain', 'bytes': '20'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-abcd',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': md5(_abcd_manifest_json).hexdigest()},
_abcd_manifest_json)
self.manifest_abcd_etag = md5hex(
md5hex("a" * 5) + md5hex(md5hex("b" * 10) + md5hex("c" * 15)) +
md5hex("d" * 20))
_bc_ranges_manifest_json = json.dumps(
[{'name': '/gettest/b_10', 'hash': md5hex('b' * 10),
'content_type': 'text/plain', 'bytes': '10',
'range': '4-7'},
{'name': '/gettest/b_10', 'hash': md5hex('b' * 10),
'content_type': 'text/plain', 'bytes': '10',
'range': '2-5'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15),
'content_type': 'text/plain', 'bytes': '15',
'range': '0-3'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15),
'content_type': 'text/plain', 'bytes': '15',
'range': '11-14'}])
self.bc_ranges_etag = md5hex(_bc_ranges_manifest_json)
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-bc-ranges',
swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=16',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Plant': 'Ficus',
'Etag': self.bc_ranges_etag},
_bc_ranges_manifest_json)
_abcd_ranges_manifest_json = json.dumps(
[{'name': '/gettest/a_5', 'hash': md5hex("a" * 5),
'content_type': 'text/plain', 'bytes': '5',
'range': '0-3'},
{'name': '/gettest/a_5', 'hash': md5hex("a" * 5),
'content_type': 'text/plain', 'bytes': '5',
'range': '1-4'},
{'name': '/gettest/manifest-bc-ranges', 'sub_slo': True,
'content_type': 'application/json;swift_bytes=16',
'hash': self.bc_ranges_etag,
'bytes': len(_bc_ranges_manifest_json),
'range': '8-15'},
{'name': '/gettest/manifest-bc-ranges', 'sub_slo': True,
'content_type': 'application/json;swift_bytes=16',
'hash': self.bc_ranges_etag,
'bytes': len(_bc_ranges_manifest_json),
'range': '0-7'},
{'name': '/gettest/d_20', 'hash': md5hex("d" * 20),
'content_type': 'text/plain', 'bytes': '20',
'range': '0-3'},
{'name': '/gettest/d_20', 'hash': md5hex("d" * 20),
'content_type': 'text/plain', 'bytes': '20',
'range': '8-11'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': md5hex(_abcd_ranges_manifest_json)},
_abcd_ranges_manifest_json)
_abcd_subranges_manifest_json = json.dumps(
[{'name': '/gettest/manifest-abcd-ranges', 'sub_slo': True,
'hash': md5hex("a" * 8),
'content_type': 'text/plain', 'bytes': '32',
'range': '6-10'},
{'name': '/gettest/manifest-abcd-ranges', 'sub_slo': True,
'hash': md5hex("a" * 8),
'content_type': 'text/plain', 'bytes': '32',
'range': '31-31'},
{'name': '/gettest/manifest-abcd-ranges', 'sub_slo': True,
'hash': md5hex("a" * 8),
'content_type': 'text/plain', 'bytes': '32',
'range': '14-18'},
{'name': '/gettest/manifest-abcd-ranges', 'sub_slo': True,
'hash': md5hex("a" * 8),
'content_type': 'text/plain', 'bytes': '32',
'range': '0-0'},
{'name': '/gettest/manifest-abcd-ranges', 'sub_slo': True,
'hash': md5hex("a" * 8),
'content_type': 'text/plain', 'bytes': '32',
'range': '22-26'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-abcd-subranges',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': md5hex(_abcd_subranges_manifest_json)},
_abcd_subranges_manifest_json)
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-badjson',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Fish': 'Bass'},
"[not {json (at ++++all")
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def test_get_manifest_passthrough(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-bc?multipart-manifest=get',
environ={'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT': 'application/json'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
self.assertTrue(
('Content-Type', 'application/json; charset=utf-8') in headers,
headers)
try:
resp_data = json.loads(body)
except ValueError:
self.fail("Invalid JSON in manifest GET: %r" % body)
self.assertEqual(
resp_data,
[{'hash': md5hex('b' * 10), 'bytes': '10', 'name': '/gettest/b_10',
'content_type': 'text/plain'},
{'hash': md5hex('c' * 15), 'bytes': '15', 'name': '/gettest/c_15',
'content_type': 'text/plain'}],
body)
def test_get_nonmanifest_passthrough(self):
req = Request.blank(
'/v1/AUTH_test/gettest/a_5',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, 'aaaaa')
def test_get_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-bc',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
manifest_etag = md5hex(md5hex("b" * 10) + md5hex("c" * 15))
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
self.assertEqual(headers['Etag'], '"%s"' % manifest_etag)
self.assertEqual(headers['X-Object-Meta-Plant'], 'Ficus')
self.assertEqual(body, 'bbbbbbbbbbccccccccccccccc')
for _, _, hdrs in self.app.calls_with_headers[1:]:
ua = hdrs.get("User-Agent", "")
self.assertTrue("SLO MultipartGET" in ua)
self.assertFalse("SLO MultipartGET SLO MultipartGET" in ua)
# the first request goes through unaltered
first_ua = self.app.calls_with_headers[0][2].get("User-Agent")
self.assertFalse(
"SLO MultipartGET" in first_ua)
def test_if_none_match_matches(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '304 Not Modified')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, '')
def test_if_none_match_does_not_match(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': "not-%s" % self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
def test_if_match_matches(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
def test_if_match_does_not_match(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': "not-%s" % self.manifest_abcd_etag})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, '')
def test_if_match_matches_and_range(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': self.manifest_abcd_etag,
'Range': 'bytes=3-6'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '4')
self.assertEqual(body, 'aabb')
def test_get_manifest_with_submanifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
self.assertEqual(headers['Etag'], '"%s"' % self.manifest_abcd_etag)
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
def test_range_get_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=3-17'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '15')
self.assertTrue('Etag' not in headers)
self.assertEqual(body, 'aabbbbbbbbbbccc')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
'bytes=3-17',
None,
None,
'bytes=3-',
None,
'bytes=0-2'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
def test_range_get_includes_whole_manifest(self):
# If the first range GET results in retrieval of the entire manifest
# body (which we can detect by looking at Content-Range), then we
# should not go make a second, non-ranged request just to retrieve the
# same bytes again.
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
def test_range_get_beyond_manifest(self):
big = 'e' * 1024 * 1024
big_etag = md5hex(big)
self.app.register(
'GET', '/v1/AUTH_test/gettest/big_seg',
swob.HTTPOk, {'Content-Type': 'application/foo',
'Etag': big_etag}, big)
big_manifest = json.dumps(
[{'name': '/gettest/big_seg', 'hash': big_etag,
'bytes': 1024 * 1024, 'content_type': 'application/foo'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/big_manifest',
swob.HTTPOk, {'Content-Type': 'application/octet-stream',
'X-Static-Large-Object': 'true',
'Etag': md5(big_manifest).hexdigest()},
big_manifest)
req = Request.blank(
'/v1/AUTH_test/gettest/big_manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=100000-199999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
count_e = sum(1 if x == 'e' else 0 for x in body)
self.assertEqual(count_e, 100000)
self.assertEqual(len(body) - count_e, 0)
self.assertEqual(
self.app.calls, [
# has Range header, gets 416
('GET', '/v1/AUTH_test/gettest/big_manifest'),
# retry the first one
('GET', '/v1/AUTH_test/gettest/big_manifest'),
('GET',
'/v1/AUTH_test/gettest/big_seg?multipart-manifest=get')])
def test_range_get_bogus_content_range(self):
# Just a little paranoia; Swift currently sends back valid
# Content-Range headers, but if somehow someone sneaks an invalid one
# in there, we'll ignore it.
def content_range_breaker_factory(app):
def content_range_breaker(env, start_response):
req = swob.Request(env)
resp = req.get_response(app)
resp.headers['Content-Range'] = 'triscuits'
return resp(env, start_response)
return content_range_breaker
self.slo = slo.filter_factory({})(
content_range_breaker_factory(self.app))
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
def test_range_get_manifest_on_segment_boundaries(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=5-29'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '25')
self.assertTrue('Etag' not in headers)
self.assertEqual(body, 'bbbbbbbbbbccccccccccccccc')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get')])
headers = [c[2] for c in self.app.calls_with_headers]
self.assertEqual(headers[0].get('Range'), 'bytes=5-29')
self.assertEqual(headers[1].get('Range'), None)
self.assertEqual(headers[2].get('Range'), None)
self.assertEqual(headers[3].get('Range'), None)
self.assertEqual(headers[4].get('Range'), None)
def test_range_get_manifest_first_byte(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(body, 'a')
# Make sure we don't get any objects we don't need, including
# submanifests.
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get')])
def test_range_get_manifest_sub_slo(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=25-30'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(body, 'cccccd')
# Make sure we don't get any objects we don't need, including
# submanifests.
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
def test_range_get_manifest_overlapping_end(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=45-55'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(body, 'ddddd')
def test_range_get_manifest_unsatisfiable(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=100-200'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '416 Requested Range Not Satisfiable')
def test_multi_range_get_manifest(self):
# SLO doesn't support multi-range GETs. The way that you express
# "unsupported" in HTTP is to return a 200 and the whole entity.
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0,2-2'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
self.assertEqual(
body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd')
def test_get_segment_with_non_ascii_name(self):
segment_body = u"a møøse once bit my sister".encode("utf-8")
self.app.register(
'GET', u'/v1/AUTH_test/ünicode/öbject-segment'.encode('utf-8'),
swob.HTTPOk, {'Content-Length': str(len(segment_body)),
'Etag': md5hex(segment_body)},
segment_body)
manifest_json = json.dumps([{'name': u'/ünicode/öbject-segment',
'hash': md5hex(segment_body),
'content_type': 'text/plain',
'bytes': len(segment_body)}])
self.app.register(
'GET', u'/v1/AUTH_test/ünicode/manifest'.encode('utf-8'),
swob.HTTPOk, {'Content-Type': 'application/json',
'Content-Length': str(len(manifest_json)),
'X-Static-Large-Object': 'true'},
manifest_json)
req = Request.blank(
'/v1/AUTH_test/ünicode/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(body, segment_body)
def test_get_range_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-ranges',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '32')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertEqual(body, 'aaaaaaaaccccccccbbbbbbbbdddddddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
None,
None,
'bytes=0-3,1-',
'bytes=0-3,11-',
'bytes=4-7,2-5',
'bytes=0-3,8-11'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
self.assertEqual(md5hex(''.join([
md5hex('a' * 5), ':0-3;',
md5hex('a' * 5), ':1-4;',
self.bc_ranges_etag, ':8-15;',
self.bc_ranges_etag, ':0-7;',
md5hex('d' * 20), ':0-3;',
md5hex('d' * 20), ':8-11;',
])), headers['Etag'].strip('"'))
def test_get_subrange_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-subranges',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertEqual(body, 'aacccdccbbbabbddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd-subranges'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
None,
None,
None,
'bytes=3-',
'bytes=0-2',
None,
'bytes=11-11',
'bytes=13-',
'bytes=4-6',
None,
'bytes=0-0',
'bytes=4-5',
'bytes=0-2'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
def test_range_get_range_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-ranges',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=7-26'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '20')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertNotIn('Etag', headers)
self.assertEqual(body, 'accccccccbbbbbbbbddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
'bytes=7-26',
None,
None,
'bytes=4-',
'bytes=0-3,11-',
'bytes=4-7,2-5',
'bytes=0-2'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
def test_range_get_subrange_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-subranges',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=4-12'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '9')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertEqual(body, 'cdccbbbab')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd-subranges'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd-subranges'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
'bytes=4-12',
None,
None,
None,
'bytes=2-2',
None,
'bytes=11-11',
'bytes=13-',
'bytes=4-6',
None,
'bytes=0-0',
'bytes=4-4'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
def test_range_get_includes_whole_range_manifest(self):
# If the first range GET results in retrieval of the entire manifest
# body (which we can detect by looking at Content-Range), then we
# should not go make a second, non-ranged request just to retrieve the
# same bytes again.
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-ranges',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-999999999'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content')
self.assertEqual(headers['Content-Length'], '32')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertEqual(body, 'aaaaaaaaccccccccbbbbbbbbdddddddd')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/gettest/manifest-abcd-ranges'),
('GET', '/v1/AUTH_test/gettest/manifest-bc-ranges'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/d_20?multipart-manifest=get')])
ranges = [c[2].get('Range') for c in self.app.calls_with_headers]
self.assertEqual(ranges, [
'bytes=0-999999999',
None,
'bytes=0-3,1-',
'bytes=0-3,11-',
'bytes=4-7,2-5',
'bytes=0-3,8-11'])
# we set swift.source for everything but the first request
self.assertIsNone(self.app.swift_sources[0])
self.assertEqual(self.app.swift_sources[1:],
['SLO'] * (len(self.app.swift_sources) - 1))
def test_multi_range_get_range_manifest(self):
# SLO doesn't support multi-range GETs. The way that you express
# "unsupported" in HTTP is to return a 200 and the whole entity.
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd-ranges',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0,2-2'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertEqual(body, 'aaaaaaaaccccccccbbbbbbbbdddddddd')
self.assertNotIn('Transfer-Encoding', headers)
self.assertNotIn('Content-Range', headers)
self.assertEqual(headers['Content-Length'], '32')
def test_get_bogus_manifest(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-badjson',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(headers['X-Object-Meta-Fish'], 'Bass')
self.assertEqual(body, '')
def test_head_manifest_is_efficient(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_slo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '50')
self.assertEqual(headers['Etag'], '"%s"' % self.manifest_abcd_etag)
self.assertEqual(body, '')
# Note the lack of recursive descent into manifest-bc. We know the
# content-length from the outer manifest, so there's no need for any
# submanifest fetching here, but a naïve implementation might do it
# anyway.
self.assertEqual(self.app.calls, [
('HEAD', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-abcd')])
def test_recursion_limit(self):
# man1 points to obj1 and man2, man2 points to obj2 and man3...
for i in range(20):
self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i,
swob.HTTPOk, {'Content-Type': 'text/plain',
'Etag': md5hex('body%02d' % i)},
'body%02d' % i)
manifest_json = json.dumps([{'name': '/gettest/obj20',
'hash': md5hex('body20'),
'content_type': 'text/plain',
'bytes': '6'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
for i in range(19, 0, -1):
manifest_data = [
{'name': '/gettest/obj%d' % i,
'hash': md5hex('body%02d' % i),
'bytes': '6',
'content_type': 'text/plain'},
{'name': '/gettest/man%d' % (i + 1),
'hash': 'man%d' % (i + 1),
'sub_slo': True,
'bytes': len(manifest_json),
'content_type':
'application/json;swift_bytes=%d' % ((21 - i) * 6)}]
manifest_json = json.dumps(manifest_data)
self.app.register(
'GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
req = Request.blank(
'/v1/AUTH_test/gettest/man1',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertIsInstance(exc, ListingIterError)
# we don't know at header-sending time that things are going to go
# wrong, so we end up with a 200 and a truncated body
self.assertEqual(status, '200 OK')
self.assertEqual(body, ('body01body02body03body04body05' +
'body06body07body08body09body10'))
# make sure we didn't keep asking for segments
self.assertEqual(self.app.call_count, 20)
def test_sub_slo_recursion(self):
# man1 points to man2 and obj1, man2 points to man3 and obj2...
for i in range(11):
self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i,
swob.HTTPOk, {'Content-Type': 'text/plain',
'Content-Length': '6',
'Etag': md5hex('body%02d' % i)},
'body%02d' % i)
manifest_json = json.dumps([{'name': '/gettest/obj%d' % i,
'hash': md5hex('body%2d' % i),
'content_type': 'text/plain',
'bytes': '6'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
self.app.register(
'HEAD', '/v1/AUTH_test/gettest/obj%d' % i,
swob.HTTPOk, {'Content-Length': '6',
'Etag': md5hex('body%2d' % i)},
None)
for i in range(9, 0, -1):
manifest_data = [
{'name': '/gettest/man%d' % (i + 1),
'hash': 'man%d' % (i + 1),
'sub_slo': True,
'bytes': len(manifest_json),
'content_type':
'application/json;swift_bytes=%d' % ((10 - i) * 6)},
{'name': '/gettest/obj%d' % i,
'hash': md5hex('body%02d' % i),
'bytes': '6',
'content_type': 'text/plain'}]
manifest_json = json.dumps(manifest_data)
self.app.register(
'GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
req = Request.blank(
'/v1/AUTH_test/gettest/man1',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, ('body10body09body08body07body06' +
'body05body04body03body02body01'))
self.assertEqual(self.app.call_count, 20)
def test_sub_slo_recursion_limit(self):
# man1 points to man2 and obj1, man2 points to man3 and obj2...
for i in range(12):
self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i,
swob.HTTPOk,
{'Content-Type': 'text/plain',
'Content-Length': '6',
'Etag': md5hex('body%02d' % i)}, 'body%02d' % i)
manifest_json = json.dumps([{'name': '/gettest/obj%d' % i,
'hash': md5hex('body%2d' % i),
'content_type': 'text/plain',
'bytes': '6'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
self.app.register(
'HEAD', '/v1/AUTH_test/gettest/obj%d' % i,
swob.HTTPOk, {'Content-Length': '6',
'Etag': md5hex('body%2d' % i)},
None)
for i in range(11, 0, -1):
manifest_data = [
{'name': '/gettest/man%d' % (i + 1),
'hash': 'man%d' % (i + 1),
'sub_slo': True,
'bytes': len(manifest_json),
'content_type':
'application/json;swift_bytes=%d' % ((12 - i) * 6)},
{'name': '/gettest/obj%d' % i,
'hash': md5hex('body%02d' % i),
'bytes': '6',
'content_type': 'text/plain'}]
manifest_json = json.dumps(manifest_data)
self.app.register('GET', '/v1/AUTH_test/gettest/man%d' % i,
swob.HTTPOk,
{'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': 'man%d' % i},
manifest_json)
req = Request.blank(
'/v1/AUTH_test/gettest/man1',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(self.app.call_count, 10)
error_lines = self.slo.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
def test_get_with_if_modified_since(self):
# It's important not to pass the If-[Un]Modified-Since header to the
# proxy for segment or submanifest GET requests, as it may result in
# 304 Not Modified responses, and those don't contain any useful data.
req = swob.Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': 'Wed, 12 Feb 2014 22:24:52 GMT',
'If-Unmodified-Since': 'Thu, 13 Feb 2014 23:25:53 GMT'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
for _, _, hdrs in self.app.calls_with_headers[1:]:
self.assertFalse('If-Modified-Since' in hdrs)
self.assertFalse('If-Unmodified-Since' in hdrs)
def test_error_fetching_segment(self):
self.app.register('GET', '/v1/AUTH_test/gettest/c_15',
swob.HTTPUnauthorized, {}, None)
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertIsInstance(exc, SegmentError)
self.assertEqual(status, '200 OK')
self.assertEqual(self.app.calls, [
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
# This one has the error, and so is the last one we fetch.
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get')])
def test_error_fetching_submanifest(self):
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-bc',
swob.HTTPUnauthorized, {}, None)
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
self.assertIsInstance(exc, ListingIterError)
self.assertEqual("200 OK", status)
self.assertEqual("aaaaa", body)
self.assertEqual(self.app.calls, [
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
# This one has the error, and so is the last one we fetch.
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
# But we were looking ahead to see if we could combine ranges,
# so we still get the first segment out
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get')])
def test_error_fetching_first_segment_submanifest(self):
# This differs from the normal submanifest error because this one
# happens before we've actually sent any response body.
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-a',
swob.HTTPForbidden, {}, None)
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-manifest-a',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/manifest-a', 'sub_slo': True,
'content_type': 'application/json;swift_bytes=5',
'hash': 'manifest-a',
'bytes': '12345'}]))
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-manifest-a',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
error_lines = self.slo.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
def test_invalid_json_submanifest(self):
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-bc',
swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25',
'X-Static-Large-Object': 'true',
'X-Object-Meta-Plant': 'Ficus'},
"[this {isn't (JSON")
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
self.assertIsInstance(exc, ListingIterError)
self.assertEqual('200 OK', status)
self.assertEqual(body, 'aaaaa')
def test_mismatched_etag(self):
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-a-b-badetag-c',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/a_5', 'hash': md5hex('a' * 5),
'content_type': 'text/plain', 'bytes': '5'},
{'name': '/gettest/b_10', 'hash': 'wrong!',
'content_type': 'text/plain', 'bytes': '10'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15),
'content_type': 'text/plain', 'bytes': '15'}]))
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-a-b-badetag-c',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
self.assertIsInstance(exc, SegmentError)
self.assertEqual('200 OK', status)
self.assertEqual(body, 'aaaaa')
def test_mismatched_size(self):
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-a-b-badsize-c',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/a_5', 'hash': md5hex('a' * 5),
'content_type': 'text/plain', 'bytes': '5'},
{'name': '/gettest/b_10', 'hash': md5hex('b' * 10),
'content_type': 'text/plain', 'bytes': '999999'},
{'name': '/gettest/c_15', 'hash': md5hex('c' * 15),
'content_type': 'text/plain', 'bytes': '15'}]))
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-a-b-badsize-c',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_slo(req, expect_exception=True)
self.assertIsInstance(exc, SegmentError)
self.assertEqual('200 OK', status)
self.assertEqual(body, 'aaaaa')
def test_first_segment_mismatched_etag(self):
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-badetag',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/a_5',
'hash': 'wrong!',
'content_type': 'text/plain',
'bytes': '5'}]))
req = Request.blank('/v1/AUTH_test/gettest/manifest-badetag',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
error_lines = self.slo.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
def test_first_segment_mismatched_size(self):
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-badsize',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/a_5',
'hash': md5hex('a' * 5),
'content_type': 'text/plain',
'bytes': '999999'}]))
req = Request.blank('/v1/AUTH_test/gettest/manifest-badsize',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
error_lines = self.slo.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
def test_download_takes_too_long(self):
the_time = [time.time()]
def mock_time():
return the_time[0]
# this is just a convenient place to hang a time jump; there's nothing
# special about the choice of is_success().
def mock_is_success(status_int):
the_time[0] += 7 * 3600
return status_int // 100 == 2
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
with patch.object(slo, 'is_success', mock_is_success), \
patch('swift.common.request_helpers.time.time',
mock_time), \
patch('swift.common.request_helpers.is_success',
mock_is_success):
status, headers, body, exc = self.call_slo(
req, expect_exception=True)
self.assertIsInstance(exc, SegmentError)
self.assertEqual(status, '200 OK')
self.assertEqual(self.app.calls, [
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
('GET', '/v1/AUTH_test/gettest/a_5?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/b_10?multipart-manifest=get'),
('GET', '/v1/AUTH_test/gettest/c_15?multipart-manifest=get')])
def test_first_segment_not_exists(self):
self.app.register('GET', '/v1/AUTH_test/gettest/not_exists_obj',
swob.HTTPNotFound, {}, None)
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-not-exists',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/gettest/not_exists_obj',
'hash': md5hex('not_exists_obj'),
'content_type': 'text/plain',
'bytes': '%d' % len('not_exists_obj')
}]))
req = Request.blank('/v1/AUTH_test/gettest/manifest-not-exists',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
error_lines = self.slo.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
class TestSloBulkLogger(unittest.TestCase):
def test_reused_logger(self):
slo_mware = slo.filter_factory({})('fake app')
self.assertTrue(slo_mware.logger is slo_mware.bulk_deleter.logger)
class TestSloCopyHook(SloTestCase):
def setUp(self):
super(TestSloCopyHook, self).setUp()
self.app.register(
'GET', '/v1/AUTH_test/c/o', swob.HTTPOk,
{'Content-Length': '3', 'Etag': md5hex("obj")}, "obj")
self.app.register(
'GET', '/v1/AUTH_test/c/man',
swob.HTTPOk, {'Content-Type': 'application/json',
'X-Static-Large-Object': 'true'},
json.dumps([{'name': '/c/o', 'hash': md5hex("obj"),
'bytes': '3'}]))
self.app.register(
'COPY', '/v1/AUTH_test/c/o', swob.HTTPCreated, {})
copy_hook = [None]
# slip this guy in there to pull out the hook
def extract_copy_hook(env, sr):
if env['REQUEST_METHOD'] == 'COPY':
copy_hook[0] = env['swift.copy_hook']
return self.app(env, sr)
self.slo = slo.filter_factory({})(extract_copy_hook)
req = Request.blank('/v1/AUTH_test/c/o',
environ={'REQUEST_METHOD': 'COPY'})
self.slo(req.environ, fake_start_response)
self.copy_hook = copy_hook[0]
self.assertTrue(self.copy_hook is not None) # sanity check
def test_copy_hook_passthrough(self):
source_req = Request.blank(
'/v1/AUTH_test/c/o',
environ={'REQUEST_METHOD': 'GET'})
sink_req = Request.blank(
'/v1/AUTH_test/c/o',
environ={'REQUEST_METHOD': 'PUT'})
# no X-Static-Large-Object header, so do nothing
source_resp = Response(request=source_req, status=200)
modified_resp = self.copy_hook(source_req, source_resp, sink_req)
self.assertTrue(modified_resp is source_resp)
def test_copy_hook_manifest(self):
source_req = Request.blank(
'/v1/AUTH_test/c/o',
environ={'REQUEST_METHOD': 'GET'})
sink_req = Request.blank(
'/v1/AUTH_test/c/o',
environ={'REQUEST_METHOD': 'PUT'})
source_resp = Response(request=source_req, status=200,
headers={"X-Static-Large-Object": "true"},
app_iter=[json.dumps([{'name': '/c/o',
'hash': md5hex("obj"),
'bytes': '3'}])])
modified_resp = self.copy_hook(source_req, source_resp, sink_req)
self.assertTrue(modified_resp is not source_resp)
self.assertEqual(modified_resp.etag, md5hex(md5hex("obj")))
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
mware = slo.filter_factory({})('have to pass in an app')
swift_info = utils.get_swift_info()
self.assertTrue('slo' in swift_info)
self.assertEqual(swift_info['slo'].get('max_manifest_segments'),
mware.max_manifest_segments)
self.assertEqual(swift_info['slo'].get('min_segment_size'),
mware.min_segment_size)
self.assertEqual(swift_info['slo'].get('max_manifest_size'),
mware.max_manifest_size)
if __name__ == '__main__':
unittest.main()
|
daasbank/swift
|
test/unit/common/middleware/test_slo.py
|
Python
|
apache-2.0
| 107,787
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ReplicaSetStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, available_replicas=None, conditions=None, fully_labeled_replicas=None, observed_generation=None, ready_replicas=None, replicas=None):
"""
V1beta1ReplicaSetStatus - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'available_replicas': 'int',
'conditions': 'list[V1beta1ReplicaSetCondition]',
'fully_labeled_replicas': 'int',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int'
}
self.attribute_map = {
'available_replicas': 'availableReplicas',
'conditions': 'conditions',
'fully_labeled_replicas': 'fullyLabeledReplicas',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas'
}
self._available_replicas = available_replicas
self._conditions = conditions
self._fully_labeled_replicas = fully_labeled_replicas
self._observed_generation = observed_generation
self._ready_replicas = ready_replicas
self._replicas = replicas
@property
def available_replicas(self):
"""
Gets the available_replicas of this V1beta1ReplicaSetStatus.
The number of available replicas (ready for at least minReadySeconds) for this replica set.
:return: The available_replicas of this V1beta1ReplicaSetStatus.
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""
Sets the available_replicas of this V1beta1ReplicaSetStatus.
The number of available replicas (ready for at least minReadySeconds) for this replica set.
:param available_replicas: The available_replicas of this V1beta1ReplicaSetStatus.
:type: int
"""
self._available_replicas = available_replicas
@property
def conditions(self):
"""
Gets the conditions of this V1beta1ReplicaSetStatus.
Represents the latest available observations of a replica set's current state.
:return: The conditions of this V1beta1ReplicaSetStatus.
:rtype: list[V1beta1ReplicaSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1beta1ReplicaSetStatus.
Represents the latest available observations of a replica set's current state.
:param conditions: The conditions of this V1beta1ReplicaSetStatus.
:type: list[V1beta1ReplicaSetCondition]
"""
self._conditions = conditions
@property
def fully_labeled_replicas(self):
"""
Gets the fully_labeled_replicas of this V1beta1ReplicaSetStatus.
The number of pods that have labels matching the labels of the pod template of the replicaset.
:return: The fully_labeled_replicas of this V1beta1ReplicaSetStatus.
:rtype: int
"""
return self._fully_labeled_replicas
@fully_labeled_replicas.setter
def fully_labeled_replicas(self, fully_labeled_replicas):
"""
Sets the fully_labeled_replicas of this V1beta1ReplicaSetStatus.
The number of pods that have labels matching the labels of the pod template of the replicaset.
:param fully_labeled_replicas: The fully_labeled_replicas of this V1beta1ReplicaSetStatus.
:type: int
"""
self._fully_labeled_replicas = fully_labeled_replicas
@property
def observed_generation(self):
"""
Gets the observed_generation of this V1beta1ReplicaSetStatus.
ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
:return: The observed_generation of this V1beta1ReplicaSetStatus.
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""
Sets the observed_generation of this V1beta1ReplicaSetStatus.
ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
:param observed_generation: The observed_generation of this V1beta1ReplicaSetStatus.
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""
Gets the ready_replicas of this V1beta1ReplicaSetStatus.
The number of ready replicas for this replica set.
:return: The ready_replicas of this V1beta1ReplicaSetStatus.
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""
Sets the ready_replicas of this V1beta1ReplicaSetStatus.
The number of ready replicas for this replica set.
:param ready_replicas: The ready_replicas of this V1beta1ReplicaSetStatus.
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""
Gets the replicas of this V1beta1ReplicaSetStatus.
Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
:return: The replicas of this V1beta1ReplicaSetStatus.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta1ReplicaSetStatus.
Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
:param replicas: The replicas of this V1beta1ReplicaSetStatus.
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`")
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1ReplicaSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
djkonro/client-python
|
kubernetes/client/models/v1beta1_replica_set_status.py
|
Python
|
apache-2.0
| 8,340
|
#!/usr/bin/env python
#coding=utf-8
foo = 'abc'
def show():
print "foo from imptee:",foo
|
licongyu95/learning_python
|
core_python_programming/cap12/imptee.py
|
Python
|
unlicense
| 96
|
'''
Cyberlocker urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from lib import jsunpack
class CyberlockerResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "cyberlocker"
domains = ["cyberlocker.ch"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
r = re.findall('<center><h3>File Not Found</h3></center><br>', html, re.I)
if r:
raise UrlResolver.ResolverError('File Not Found or removed')
if not r:
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
data['method_free'] = 'Wait for 0 seconds'
html = self.net.http_POST(url, data).content
sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>(eval\('
sPattern += 'function\(p,a,c,k,e,d\)(?!.+player_ads.+).+np_vid.+?)'
sPattern += '\s+?</script>'
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
if r:
sJavascript = r.group(1)
sUnpacked = jsunpack.unpack(sJavascript)
sPattern = '<embed id="np_vid"type="video/divx"src="(.+?)'
sPattern += '"custommode='
r = re.search(sPattern, sUnpacked)
if r:
return r.group(1)
else:
num = re.compile('cyberlocker\|(.+?)\|http').findall(html)
pre = 'http://'+num[0]+'.cyberlocker.ch:182/d/'
preb = re.compile('image\|(.+?)\|video\|(.+?)\|').findall(html)
for ext, link in preb:
r = pre+link+'/video.'+ext
return r
def get_url(self, host, media_id):
return 'http://cyberlocker.ch/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?cyberlocker.ch/' +
'[0-9A-Za-z]+', url) or
'cyberlocker' in host)
|
xmbcrios/xmbcrios.repository
|
script.module.urlresolver/lib/urlresolver/plugins/cyberlocker.py
|
Python
|
gpl-2.0
| 3,313
|
# coding=utf-8
"""QGIS Expressions which are available in the QGIS GUI interface."""
from qgis.core import (
qgsfunction,
QgsMapLayerRegistry,
QgsExpressionContextUtils,
)
import datetime
from safe.definitions.provenance import provenance_layer_analysis_impacted_id
from safe.utilities.i18n import tr
from safe.utilities.rounding import denomination, round_affected_number
from safe.utilities.utilities import generate_expression_help
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
##
# For QGIS < 2.18.13 and QGIS < 2.14.19, docstrings are used in the QGIS GUI
# in the Expression dialog and also in the InaSAFE Help dialog.
#
# For QGIS >= 2.18.13, QGIS >= 2.14.19 and QGIS 3, the translated variable will
# be used in QGIS.
# help_text is used for QGIS 2.18 and 2.14
# helpText is used for QGIS 3 : https://github.com/qgis/QGIS/pull/5059
##
description = tr('Retrieve a value from a field in the impact analysis layer.')
examples = {
'inasafe_impact_analysis_field_value(\'total_not_exposed\')': 3
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def inasafe_impact_analysis_field_value(field, feature, parent):
"""Retrieve a value from a field in the impact analysis layer.
e.g. inasafe_impact_analysis_field_value('total_not_exposed') -> 3
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope()
registry = QgsMapLayerRegistry.instance()
key = provenance_layer_analysis_impacted_id['provenance_key']
if not project_context_scope.hasVariable(key):
return None
layer = registry.mapLayer(project_context_scope.variable(key))
if not layer:
return None
index = layer.fieldNameIndex(field)
if index < 0:
return None
feature = layer.getFeatures().next()
return feature[index]
description = tr(
'Given a number, it will return the place value name. It needs to be used '
'with inasafe_place_value_coefficient.')
examples = {
'inasafe_place_value_name(10)': tr('Ten'),
'inasafe_place_value_name(1700)': tr('Thousand')
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def inasafe_place_value_name(number, feature, parent):
"""Given a number, it will return the place value name.
For instance:
* inasafe_place_value_name(10) -> Ten \n
* inasafe_place_value_name(1700) -> Thousand
It needs to be used with inasafe_place_value_coefficient.
"""
_ = feature, parent # NOQA
if number is None:
return None
rounded_number = round_affected_number(
number,
enable_rounding=True,
use_population_rounding=True
)
value, unit = denomination(rounded_number, 1000)
if not unit:
return None
else:
return unit['name']
description = tr(
'Given a number, it will return the coefficient of the place value name. '
'It needs to be used with inasafe_number_denomination_unit.')
examples = {
'inasafe_place_value_coefficient(10)': 1,
'inasafe_place_value_coefficient(1700)': 1.7
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def inasafe_place_value_coefficient(number, feature, parent):
"""Given a number, it will return the coefficient of the place value name.
For instance:
* inasafe_place_value_coefficient(10) -> 1
* inasafe_place_value_coefficient(1700) -> 1.7
It needs to be used with inasafe_number_denomination_unit.
"""
_ = feature, parent # NOQA
if number >= 0:
rounded_number = round_affected_number(
number,
enable_rounding=True,
use_population_rounding=True
)
value, unit = denomination(rounded_number, 1000)
return str(round(value, 1))
else:
return None
description = tr(
'Given a number and total, it will return the percentage of the number to '
'the total.')
examples = {
'inasafe_place_value_percentage(inasafe_impact_analysis_field_value('
'\'female_displaced\'), '
'inasafe_impact_analysis_field_value(\'displaced\'))': tr(
'will calculate the percentage of female displaced count to total '
'displaced count.'),
'inasafe_place_value_percentage(50,100)': '50.0%'
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def inasafe_place_value_percentage(number, total, feature, parent):
"""Given a number and total, it will return the percentage of the number
to the total.
For instance:
* inasafe_place_value_percentage(inasafe_impact_analysis_field_value(
'female_displaced'), inasafe_impact_analysis_field_value('displaced'))
-> will calculate the percentage of female displaced count to total
displaced count.
It also can be used by pure number (int, float).
"""
_ = feature, parent # NOQA
if number < 0:
return None
percentage_format = '{percentage}%'
percentage = round((float(number) / float(total)) * 100, 1)
return percentage_format.format(percentage=percentage)
description = tr(
'Given an InaSAFE analysis time, it will convert it to a date with '
'year-month-date format.')
examples = {
'beautify_date( @start_datetime )': tr(
'will convert datetime provided by qgis_variable.')
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def beautify_date(inasafe_time, feature, parent):
"""Given an InaSAFE analysis time, it will convert it to a date with
year-month-date format.
For instance:
* beautify_date( @start_datetime ) -> will convert datetime provided by
qgis_variable.
"""
_ = feature, parent # NOQA
datetime_object = datetime.datetime.strptime(
inasafe_time, '%Y-%m-%dT%H:%M:%S.%f')
date = datetime_object.strftime('%Y-%m-%d')
return date
description = tr(
'Given an InaSAFE analysis time, it will convert it to a time with '
'hour:minute format.')
examples = {
'beautify_date( @start_datetime )': tr(
'will convert datetime provided by qgis_variable.')
}
help_message = generate_expression_help(description, examples)
@qgsfunction(
args='auto', group='InaSAFE', usesGeometry=False, referencedColumns=[],
help_text=help_message.to_html(), helpText=help_message.to_html())
def beautify_time(inasafe_time, feature, parent):
"""Given an InaSAFE analysis time, it will convert it to a time with
hour:minute format.
For instance:
* beautify_date( @start_datetime ) -> will convert datetime provided by
qgis_variable.
"""
_ = feature, parent # NOQA
datetime_object = datetime.datetime.strptime(
inasafe_time, '%Y-%m-%dT%H:%M:%S.%f')
time = datetime_object.strftime('%H:%M')
return time
|
myarjunar/inasafe
|
safe/gis/generic_expressions.py
|
Python
|
gpl-3.0
| 7,673
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
from logbook import Logger
import numpy as np
from numpy import float64, int64, nan
import pandas as pd
from pandas import isnull
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import (
Asset,
AssetConvertible,
Equity,
Future,
PricingDataAssociable,
)
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.data.continuous_future_reader import (
ContinuousFutureSessionBarReader,
ContinuousFutureMinuteBarReader
)
from zipline.assets.roll_finder import (
CalendarRollFinder,
VolumeRollFinder
)
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.utils.pandas_utils import timedelta_to_integral_minutes
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open",
"high",
"low",
"close",
"volume",
"price",
"contract",
"sid",
"last_traded",
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
DEFAULT_MINUTE_HISTORY_PREFETCH = 1560
DEFAULT_DAILY_HISTORY_PREFETCH = 40
_DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
_DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
last_available_session : pd.Timestamp, optional
The last session to make available in session-level data.
last_available_minute : pd.Timestamp, optional
The last minute to make available in minute-level data.
"""
def __init__(self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None,
last_available_session=None,
last_available_minute=None,
minute_history_prefetch_length=_DEF_M_HIST_PREFETCH,
daily_history_prefetch_length=_DEF_D_HIST_PREFETCH):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_available_session = first_trading_day
if last_available_session:
self._last_available_session = last_available_session
else:
# Infer the last session from the provided readers.
last_sessions = [
reader.last_available_dt
for reader in [equity_daily_reader, future_daily_reader]
if reader is not None
]
if last_sessions:
self._last_available_session = min(last_sessions)
else:
self._last_available_session = None
if last_available_minute:
self._last_available_minute = last_available_minute
else:
# Infer the last minute from the provided readers.
last_minutes = [
reader.last_available_dt
for reader in [equity_minute_reader, future_minute_reader]
if reader is not None
]
if last_minutes:
self._last_available_minute = min(last_minutes)
else:
self._last_available_minute = None
aligned_equity_minute_reader = self._ensure_reader_aligned(
equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(
equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(
future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(
future_daily_reader)
self._roll_finders = {
'calendar': CalendarRollFinder(self.trading_calendar,
self.asset_finder),
}
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
aligned_minute_readers[ContinuousFuture] = \
ContinuousFutureMinuteBarReader(
aligned_future_minute_reader,
self._roll_finders,
)
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
self._roll_finders['volume'] = VolumeRollFinder(
self.trading_calendar,
self.asset_finder,
aligned_future_session_reader,
)
aligned_session_readers[ContinuousFuture] = \
ContinuousFutureSessionBarReader(
aligned_future_session_reader,
self._roll_finders,
)
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
self._last_available_minute,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
self._last_available_session,
)
self._pricing_readers = {
'minute': _dispatch_minute_reader,
'daily': _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=daily_history_prefetch_length,
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=minute_history_prefetch_length,
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(
self._first_trading_day
)
if self._first_trading_day is not None else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == 'minute':
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
elif reader.data_frequency == 'session':
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture))))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, assets, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
# If 'assets' was not one of the expected types then it should be
# an iterable.
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected 'assets' value of type {}."
.format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
def get_single_asset_value(asset):
if self._is_extra_source(
asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and
session_label > asset.end_date) or \
(data_frequency == "minute" and
session_label > asset.end_date):
if field == "volume":
return 0
elif field == "contract":
return None
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
if field == "contract":
return self._get_current_contract(asset, session_label)
else:
return self._get_daily_spot_value(
asset, field, session_label,
)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, 'minute')
elif field == "price":
return self._get_minute_spot_value(
asset, "close", dt, ffill=True,
)
elif field == "contract":
return self._get_current_contract(asset, dt)
else:
return self._get_minute_spot_value(asset, field, dt)
if assets_is_scalar:
return get_single_asset_value(assets)
else:
return list(map(get_single_asset_value, assets))
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader('minute')
if ffill:
# If forward filling, we want the last minute with values (up to
# and including dt).
query_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(query_dt):
# no last traded dt, bail
if column == 'volume':
return 0
else:
return np.nan
else:
# If not forward filling, we just want dt.
query_dt = dt
try:
result = reader.get_value(asset.sid, query_dt, column)
except NoDataOnDate:
if column == 'volume':
return 0
else:
return np.nan
if not ffill or (dt == query_dt) or (dt.date() == query_dt.date()):
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, query_dt,
dt, "minute", spot_value=result
)
def _get_daily_spot_value(self, asset, column, dt):
reader = self._get_pricing_reader('daily')
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
return reader.get_value(asset, dt, column)
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = reader.get_value(
asset, found_dt, "close"
)
if not isnull(value):
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= self.trading_calendar.day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.trading_calendar.all_sessions
end_loc = tds.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < self._first_trading_day_loc:
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[
self._first_trading_day_loc + bar_count
].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self,
assets,
end_dt,
bar_count,
field_to_use,
data_frequency):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_data(self,
assets,
days_for_window,
end_dt,
field_to_use,
data_frequency):
if data_frequency == 'daily':
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_data(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_data(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._daily_aggregator.volumes(
assets, end_dt)
elif field_to_use == 'sid':
minute_value = [
int(self._get_current_contract(asset, end_dt))
for asset in assets]
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _handle_minute_history_out_of_bounds(self, bar_count):
cal = self.trading_calendar
first_trading_minute_loc = (
cal.all_minutes.get_loc(
self._first_trading_minute
)
if self._first_trading_minute is not None else None
)
suggested_start_day = cal.minute_to_session_label(
cal.all_minutes[
first_trading_minute_loc + bar_count
] + cal.day
)
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day.date(),
)
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != 'sid':
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close", data_frequency)
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field, data_frequency)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.ix[0, assets_with_leading_nan] = np.array(
initial_values,
dtype=np.float64
)
df.fillna(method='ffill', inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df
def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False)
def _get_daily_window_data(self,
assets,
field,
days_in_window,
extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != 'sid' else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [(self.asset_finder.retrieve_asset(split[0]), split[1])
for split in splits]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# This function works in three steps.
# Step 1. Count the minutes from ``ending_minute`` to the start of its
# session.
# Step 2. Count the minutes from the prior ``days_count - 1`` sessions.
# Step 3. Return the sum of the results from steps (1) and (2).
# Example (NYSE Calendar)
# ending_minute = 2016-12-28 9:40 AM US/Eastern
# days_count = 3
# Step 1. Calculate that there are 10 minutes in the ending session.
# Step 2. Calculate that there are 390 + 210 = 600 minutes in the prior
# two sessions. (Prior sessions are 2015-12-23 and 2015-12-24.)
# 2015-12-24 is a half day.
# Step 3. Return 600 + 10 = 610.
cal = self.trading_calendar
ending_session = cal.minute_to_session_label(
ending_minute,
direction="none", # It's an error to pass a non-trading minute.
)
# Assume that calendar days are always full of contiguous minutes,
# which means we can just take 1 + (number of minutes between the last
# minute and the start of the session). We add one so that we include
# the ending minute in the total.
ending_session_minute_count = timedelta_to_integral_minutes(
ending_minute - cal.open_and_close_for_session(ending_session)[0]
) + 1
if days_count == 1:
# We just need sessions for the active day.
return ending_session_minute_count
# XXX: We're subtracting 2 here to account for two offsets:
# 1. We only want ``days_count - 1`` sessions, since we've already
# accounted for the ending session above.
# 2. The API of ``sessions_window`` is to return one more session than
# the requested number. I don't think any consumers actually want
# that behavior, but it's the tested and documented behavior right
# now, so we have to request one less session than we actually want.
completed_sessions = cal.sessions_window(
cal.previous_session_label(ending_session),
2 - days_count,
)
completed_sessions_minute_count = (
self.trading_calendar.minutes_count_for_sessions_in_range(
completed_sessions[0],
completed_sessions[-1]
)
)
return ending_session_minute_count + completed_sessions_minute_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset],
dt,
2,
"1d",
"price",
data_frequency,
ffill=True,
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = int(self._get_minute_count_for_transform(
dt, bars
))
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"price",
data_frequency,
ffill=True,
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"volume",
data_frequency,
ffill=True,
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain)
def _get_current_contract(self, continuous_future, dt):
rf = self._roll_finders[continuous_future.roll_style]
contract_sid = rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset)
if contract_sid is None:
return None
return self.asset_finder.retrieve_asset(contract_sid)
|
humdings/zipline
|
zipline/data/data_portal.py
|
Python
|
apache-2.0
| 51,743
|
from pic18f45k50 import *
|
sgnn7/sgfc
|
io_dev/sgfc_io/devices/pic18f45k50/__init__.py
|
Python
|
lgpl-2.1
| 26
|
# -*- coding: utf-8 -*-
import bpy
from bpy.types import Operator
from mmd_tools import register_wrap
from mmd_tools.bpyutils import SceneOp
from mmd_tools.core.bone import FnBone
from mmd_tools.translations import DictionaryEnum
import mmd_tools.core.model as mmd_model
@register_wrap
class MorphSliderSetup(Operator):
bl_idname = 'mmd_tools.morph_slider_setup'
bl_label = 'Morph Slider Setup'
bl_description = 'Translate MMD morphs of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('CREATE', 'Create', 'Create placeholder object for morph sliders', 'SHAPEKEY_DATA', 0),
('BIND', 'Bind', 'Bind morph sliders', 'DRIVER', 1),
('UNBIND', 'Unbind', 'Unbind morph sliders', 'X', 2),
],
default='CREATE',
)
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
if self.type == 'BIND':
rig.morph_slider.bind()
elif self.type == 'UNBIND':
rig.morph_slider.unbind()
else:
rig.morph_slider.create()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class CleanRiggingObjects(Operator):
bl_idname = 'mmd_tools.clean_rig'
bl_label = 'Clean Rig'
bl_description = 'Delete temporary physics objects of selected object and revert physics to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.clean()
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class BuildRig(Operator):
bl_idname = 'mmd_tools.build_rig'
bl_label = 'Build Rig'
bl_description = 'Translate physics of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.build()
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class CleanAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.clean_additional_transform'
bl_label = 'Clean Additional Transform'
bl_description = 'Delete shadow bones of selected object and revert bones to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.cleanAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class ApplyAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.apply_additional_transform'
bl_label = 'Apply Additional Transform'
bl_description = 'Translate appended bones of selected object for Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.applyAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class SetupBoneFixedAxes(Operator):
bl_idname = 'mmd_tools.bone_fixed_axis_setup'
bl_label = 'Setup Bone Fixed Axis'
bl_description = 'Setup fixed axis of selected bones'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD fixed axis of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD fixed axis of selected bones from their Y-axis or the only rotatable axis', 1),
('APPLY', 'Apply', 'Align bone axes to MMD fixed axis of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_fixed_axis(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_fixed_axis(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class SetupBoneLocalAxes(Operator):
bl_idname = 'mmd_tools.bone_local_axes_setup'
bl_label = 'Setup Bone Local Axes'
bl_description = 'Setup local axes of each bone'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD local axes of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD local axes of selected bones from their bone axes', 1),
('APPLY', 'Apply', 'Align bone axes to MMD local axes of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_local_axes(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_local_axes(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class CreateMMDModelRoot(Operator):
bl_idname = 'mmd_tools.create_mmd_model_root_object'
bl_label = 'Create a MMD Model Root Object'
bl_description = 'Create a MMD model root object with a basic armature'
bl_options = {'REGISTER', 'UNDO'}
name_j = bpy.props.StringProperty(
name='Name',
description='The name of the MMD model',
default='New MMD Model',
)
name_e = bpy.props.StringProperty(
name='Name(Eng)',
description='The english name of the MMD model',
default='New MMD Model',
)
scale = bpy.props.FloatProperty(
name='Scale',
description='Scale',
default=1.0,
)
def execute(self, context):
rig = mmd_model.Model.create(self.name_j, self.name_e, self.scale, add_root_bone=True)
rig.initialDisplayFrames()
return {'FINISHED'}
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
@register_wrap
class ConvertToMMDModel(Operator):
bl_idname = 'mmd_tools.convert_to_mmd_model'
bl_label = 'Convert to a MMD Model'
bl_description = 'Convert active armature with its meshes to a MMD model (experimental)'
bl_options = {'REGISTER', 'UNDO'}
ambient_color_source = bpy.props.EnumProperty(
name='Ambient Color Source',
description='Select ambient color source',
items = [
('DIFFUSE', 'Diffuse', 'Diffuse color', 0),
('MIRROR', 'Mirror', 'Mirror color (if property "mirror_color" is available)', 1),
],
default='DIFFUSE',
)
edge_threshold = bpy.props.FloatProperty(
name='Edge Threshold',
description='MMD toon edge will not be enabled if freestyle line color alpha less than this value',
min=0,
max=1.001,
precision=3,
step=0.1,
default=0.1,
)
edge_alpha_min = bpy.props.FloatProperty(
name='Minimum Edge Alpha',
description='Minimum alpha of MMD toon edge color',
min=0,
max=1,
precision=3,
step=0.1,
default=0.5,
)
@classmethod
def poll(cls, context):
obj = context.active_object
return obj and obj.type == 'ARMATURE' and obj.mode != 'EDIT'
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
def execute(self, context):
#TODO convert some basic MMD properties
armature = context.active_object
scale = 1
model_name = 'New MMD Model'
root = mmd_model.Model.findRoot(armature)
if root is None or root != armature.parent:
rig = mmd_model.Model.create(model_name, model_name, scale, armature=armature)
self.__attach_meshes_to(armature, SceneOp(context).id_objects)
self.__configure_rig(mmd_model.Model(armature.parent))
return {'FINISHED'}
def __attach_meshes_to(self, armature, objects):
def __is_child_of_armature(mesh):
if mesh.parent is None:
return False
return mesh.parent == armature or __is_child_of_armature(mesh.parent)
def __is_using_armature(mesh):
for m in mesh.modifiers:
if m.type =='ARMATURE' and m.object == armature:
return True
return False
def __get_root(mesh):
if mesh.parent is None:
return mesh
return __get_root(mesh.parent)
for x in objects:
if __is_using_armature(x) and not __is_child_of_armature(x):
x_root = __get_root(x)
m = x_root.matrix_world
x_root.parent_type = 'OBJECT'
x_root.parent = armature
x_root.matrix_world = m
def __configure_rig(self, rig):
root = rig.rootObject()
armature = rig.armature()
meshes = tuple(rig.meshes())
rig.loadMorphs()
vertex_groups = {g.name for mesh in meshes for g in mesh.vertex_groups}
for pose_bone in armature.pose.bones:
if not pose_bone.parent:
continue
if not pose_bone.bone.use_connect and pose_bone.name not in vertex_groups:
continue
pose_bone.lock_location = (True, True, True)
from mmd_tools.core.material import FnMaterial
for m in {x for mesh in meshes for x in mesh.data.materials if x}:
FnMaterial.convert_to_mmd_material(m)
mmd_material = m.mmd_material
if self.ambient_color_source == 'MIRROR' and hasattr(m, 'mirror_color'):
mmd_material.ambient_color = m.mirror_color
else:
mmd_material.ambient_color = [0.5*c for c in mmd_material.diffuse_color]
if hasattr(m, 'line_color'): # freestyle line color
line_color = list(m.line_color)
mmd_material.enabled_toon_edge = line_color[3] >= self.edge_threshold
mmd_material.edge_color = line_color[:3] + [max(line_color[3], self.edge_alpha_min)]
from mmd_tools.operators.display_item import DisplayItemQuickSetup
DisplayItemQuickSetup.load_bone_groups(root.mmd_root, armature)
rig.initialDisplayFrames(reset=False) # ensure default frames
DisplayItemQuickSetup.load_facial_items(root.mmd_root)
root.mmd_root.active_display_item_frame = 0
@register_wrap
class TranslateMMDModel(Operator):
bl_idname = 'mmd_tools.translate_mmd_model'
bl_label = 'Translate a MMD Model'
bl_description = 'Translate Japanese names of a MMD model'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
dictionary = bpy.props.EnumProperty(
name='Dictionary',
items=DictionaryEnum.get_dictionary_items,
description='Translate names from Japanese to English using selected dictionary',
)
types = bpy.props.EnumProperty(
name='Types',
description='Select which parts will be translated',
options={'ENUM_FLAG'},
items = [
('BONE', 'Bones', 'Bones', 1),
('MORPH', 'Morphs', 'Morphs', 2),
('MATERIAL', 'Materials', 'Materials', 4),
('DISPLAY', 'Display', 'Display frames', 8),
('PHYSICS', 'Physics', 'Rigidbodies and joints', 16),
('INFO', 'Information', 'Model name and comments', 32),
],
default={'BONE', 'MORPH', 'MATERIAL', 'DISPLAY', 'PHYSICS',},
)
modes = bpy.props.EnumProperty(
name='Modes',
description='Select translation mode',
options={'ENUM_FLAG'},
items = [
('MMD', 'MMD Names', 'Fill MMD English names', 1),
('BLENDER', 'Blender Names', 'Translate blender names (experimental)', 2),
],
default={'MMD'},
)
use_morph_prefix = bpy.props.BoolProperty(
name='Use Morph Prefix',
description='Add/remove prefix to English name of morph',
default=False,
)
overwrite = bpy.props.BoolProperty(
name='Overwrite',
description='Overwrite a translated English name',
default=False,
)
allow_fails = bpy.props.BoolProperty(
name='Allow Fails',
description='Allow incompletely translated names',
default=False,
)
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
def execute(self, context):
try:
self.__translator = DictionaryEnum.get_translator(self.dictionary)
except Exception as e:
self.report({'ERROR'}, 'Failed to load dictionary: %s'%e)
return {'CANCELLED'}
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
if 'MMD' in self.modes:
for i in self.types:
getattr(self, 'translate_%s'%i.lower())(rig)
if 'BLENDER' in self.modes:
self.translate_blender_names(rig)
translator = self.__translator
txt = translator.save_fails()
if translator.fails:
self.report({'WARNING'}, "Failed to translate %d names, see '%s' in text editor"%(len(translator.fails), txt.name))
return {'FINISHED'}
def translate(self, name_j, name_e):
if not self.overwrite and name_e and self.__translator.is_translated(name_e):
return name_e
if self.allow_fails:
name_e = None
return self.__translator.translate(name_j, name_e)
def translate_blender_names(self, rig):
if 'BONE' in self.types:
for b in rig.armature().pose.bones:
rig.renameBone(b.name, self.translate(b.name, b.name))
if 'MORPH' in self.types:
for i in (x for x in rig.meshes() if x.data.shape_keys):
for kb in i.data.shape_keys.key_blocks:
kb.name = self.translate(kb.name, kb.name)
if 'MATERIAL' in self.types:
for m in (x for x in rig.materials() if x):
m.name = self.translate(m.name, m.name)
if 'DISPLAY' in self.types:
for g in rig.armature().pose.bone_groups:
g.name = self.translate(g.name, g.name)
if 'PHYSICS' in self.types:
for i in rig.rigidBodies():
i.name = self.translate(i.name, i.name)
for i in rig.joints():
i.name = self.translate(i.name, i.name)
if 'INFO' in self.types:
objects = [rig.rootObject(), rig.armature()]
objects.extend(rig.meshes())
for i in objects:
i.name = self.translate(i.name, i.name)
def translate_info(self, rig):
mmd_root = rig.rootObject().mmd_root
mmd_root.name_e = self.translate(mmd_root.name, mmd_root.name_e)
comment_text = bpy.data.texts.get(mmd_root.comment_text, None)
comment_e_text = bpy.data.texts.get(mmd_root.comment_e_text, None)
if comment_text and comment_e_text:
comment_e = self.translate(comment_text.as_string(), comment_e_text.as_string())
comment_e_text.from_string(comment_e)
def translate_bone(self, rig):
bones = rig.armature().pose.bones
for b in bones:
if b.is_mmd_shadow_bone:
continue
b.mmd_bone.name_e = self.translate(b.mmd_bone.name_j, b.mmd_bone.name_e)
def translate_morph(self, rig):
mmd_root = rig.rootObject().mmd_root
attr_list = ('group', 'vertex', 'bone', 'uv', 'material')
prefix_list = ('G_', '', 'B_', 'UV_', 'M_')
for attr, prefix in zip(attr_list, prefix_list):
for m in getattr(mmd_root, attr+'_morphs', []):
m.name_e = self.translate(m.name, m.name_e)
if not prefix:
continue
if self.use_morph_prefix:
if not m.name_e.startswith(prefix):
m.name_e = prefix + m.name_e
elif m.name_e.startswith(prefix):
m.name_e = m.name_e[len(prefix):]
def translate_material(self, rig):
for m in rig.materials():
if m is None:
continue
m.mmd_material.name_e = self.translate(m.mmd_material.name_j, m.mmd_material.name_e)
def translate_display(self, rig):
mmd_root = rig.rootObject().mmd_root
for f in mmd_root.display_item_frames:
f.name_e = self.translate(f.name, f.name_e)
def translate_physics(self, rig):
for i in rig.rigidBodies():
i.mmd_rigid.name_e = self.translate(i.mmd_rigid.name_j, i.mmd_rigid.name_e)
for i in rig.joints():
i.mmd_joint.name_e = self.translate(i.mmd_joint.name_j, i.mmd_joint.name_e)
|
powroupi/blender_mmd_tools
|
mmd_tools/operators/model.py
|
Python
|
gpl-3.0
| 17,794
|
import importlib
import inspect
import pathlib
from .adapters import TerminalProvider
def _is_adapter(member) -> bool:
return (inspect.isclass(member)
and issubclass(member, TerminalProvider)
and member != TerminalProvider)
def _get_adapter_classes() -> [TerminalProvider]:
"""
This methods reads all the modules in the adapters folder searching for
all the implementing wallpaper adapter classes
thanks for/adapted from https://github.com/cclauss/adapter_pattern/
"""
adapter_dir = pathlib.Path(__file__).resolve().parent / 'adapters'
for file in adapter_dir.iterdir():
if file.suffix.lower() == '.py' and not file.name.startswith('__'):
module = importlib.import_module('.' + file.name.split('.')[0], 'pokemonterminal.terminal.adapters')
for _, c in inspect.getmembers(module, _is_adapter):
yield c
def get_current_terminal_adapters() -> [TerminalProvider]:
arr = _get_adapter_classes()
return [x for x in arr if x.is_compatible()]
|
LazoCoder/Pokemon-Terminal
|
pokemonterminal/terminal/__init__.py
|
Python
|
gpl-3.0
| 1,053
|
"""
Copyright 2013-2016 Tuomas Jaakola
This file is part of TOE.
TOE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TOE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TOE. If not, see <http://www.gnu.org/licenses/>.
More information about tiles:
http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
"""
import sys
import os
import math
import imghdr
from globalmaptiles import GlobalMercator
from downloader import Downloader
class TileLoader(object):
TILE_WIDTH = 256 # tile is square
TILE_FORMAT = 'png'
def __init__(self, min_lat, min_lon, max_lat, max_lon, width, max_zoom = 18):
self.tiles = []
self.min_lat = min_lat
self.min_lon = min_lon
self.max_lat = max_lat
self.max_lon = max_lon
self.mercator = GlobalMercator()
self.downloader = Downloader()
# count how many horizontal tiles we need
self.x_tiles_needed = math.ceil(width / self.TILE_WIDTH)
self.max_zoom = max_zoom
def download(self, cache_dir, url, http_headers):
"""Downloads tiles and returns list of downloaded tiles."""
tile_files = {}
tiles = self._get_tile_list()
for (tx, ty, tz) in tiles:
cx, cy, cz = self._convert_tile(tx, ty, tz)
tile_url = url.replace('{x}', str(cx)).replace('{y}', str(cy)).replace('{z}', str(cz))
tile_file = self._gen_tile_file(tx, ty, tz, cache_dir)
self.downloader.download(tile_file, tile_url, http_headers)
tile_files[tile_url] = tile_file
# wait downloads to be finished
self.downloader.wait()
# validate all tiles
valid = True
for tile_url, tile_file in tile_files.iteritems():
if self.TILE_FORMAT == 'png' and imghdr.what(tile_file) != 'png':
sys.stderr.write("%s is not PNG image\n" % tile_url)
valid = False
if not valid:
return None
return tile_files.values()
def _get_tile_list(self):
"""Returns list of tiles needed to cover bounding box."""
tiles = []
tile_info = self._find_tiles()
if tile_info is not None:
(tminx, tminy, tmaxx, tmaxy, tz) = tile_info
for ty in range(tminy, tmaxy + 1):
for tx in range(tminx, tmaxx + 1):
tiles.append((tx, ty, tz))
return tiles
def _find_tiles(self):
"""Returns optimal zoom level based on given width."""
for zoom_level in range(1, self.max_zoom + 1):
tminx, tminy = self._lat_lon_to_tile(self.min_lat, self.min_lon, zoom_level)
tmaxx, tmaxy = self._lat_lon_to_tile(self.max_lat, self.max_lon, zoom_level)
x_tiles = tmaxx + 1 - tminx
if x_tiles > self.x_tiles_needed or zoom_level == self.max_zoom:
# optimal zoom level found
return (tminx, tminy, tmaxx, tmaxy, zoom_level)
return None
def _lat_lon_to_tile(self, lat, lon, zoom_level):
"""Converts given latLon to tile XY"""
mx, my = self.mercator.LatLonToMeters(lat, lon)
tx, ty = self.mercator.MetersToTile(mx, my, zoom_level)
return (tx, ty)
def _gen_tile_file(self, tx, ty, tz, cache_dir):
"""Returns filename where tile will be saved as."""
filename = "%d_%d_%d.%s" % (tx, ty, tz, self.TILE_FORMAT)
return os.path.join(cache_dir, filename)
class TMSTileLoader(TileLoader):
def _convert_tile(self, tx, ty, tz):
return tx, ty, tz
class GoogleTileLoader(TileLoader):
def _convert_tile(self, tx, ty, tz):
gx, gy = self.mercator.GoogleTile(tx, ty, tz)
return gx, gy, tz
class FTileLoader(TileLoader):
def _convert_tile(self, tx, ty, tz):
fx = tx - 2**(tz - 1)
fy = ty - 2**(tz - 1)
fz = 18 - tz
return fx, fy, fz
|
iqqmuT/toe
|
export/mapnik/tileloader.py
|
Python
|
gpl-3.0
| 4,326
|
"""newsApi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from api import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^$', views.index),
url(r'^getNews/(?P<id>.+)', views.getNews),
url(r'^getWechat/(?P<id>.+)', views.getWechat),
url(r'^getSource', views.getSource),
url(r'^getCatalogue', views.getCatalogue),
url(r'^getNewsList', views.getNewsList),
url(r'^getKeywords', views.getKeywords),
url(r'^getNewsByKey', views.getNewsByKey),
url(r'^getWechatSource', views.getWechatSource),
url(r'^getWechatList', views.getWechatList),
url(r'^news/$', views.News.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
yinzishao/NewsScrapy
|
newsApi/api/urls.py
|
Python
|
lgpl-3.0
| 1,341
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import fixtures
import mock
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1',
'capabilities_network': ['cap1', 'cap2', 'cap3']}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'V1', 'device_id': 'D1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
def test_spec_list(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap1', 'cap2',
'cap3']}]))
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap3', 'cap1']}]))
def test_spec_list_no_matching(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap1', 'cap33']}]))
def test_spec_list_wrong_type(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': ['d1']}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
class GetFunctionByIfnameTestCase(test.NoDBTestCase):
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_virtual_function(self, mock_readlink, *args):
mock_readlink.return_value = '../../../0000.00.00.1'
with mock.patch('builtins.open', side_effect=IOError()):
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000.00.00.1')
self.assertFalse(physical_function)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_physical_function(self, mock_readlink, *args):
ifname = 'eth0'
totalvf_path = "/sys/class/net/%s/device/%s" % (ifname,
utils._SRIOV_TOTALVFS)
mock_readlink.return_value = '../../../0000:00:00.1'
with self.patch_open(totalvf_path, '4') as mock_open:
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000:00:00.1')
self.assertTrue(physical_function)
mock_open.assert_called_once_with(totalvf_path)
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
address, physical_function = utils.get_function_by_ifname('lo')
self.assertIsNone(address)
self.assertFalse(physical_function)
class IsPhysicalFunctionTestCase(test.NoDBTestCase):
def setUp(self):
super(IsPhysicalFunctionTestCase, self).setUp()
self.pci_args = utils.get_pci_address_fields('0000:00:00.1')
@mock.patch('os.path.isdir', return_value=True)
def test_virtual_function(self, *args):
with mock.patch('builtins.open', side_effect=IOError()):
self.assertFalse(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=True)
def test_physical_function(self, *args):
with mock.patch('builtins.open', mock.mock_open(read_data='4')):
self.assertTrue(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
self.assertFalse(utils.is_physical_function(*self.pci_args))
class GetIfnameByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetIfnameByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_virtual_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=False)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_ifname_by_pci_address,
self.pci_address
)
class GetMacByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetMacByPciAddressTestCase, self).setUp()
self.pci_address = '0000:07:00.1'
self.if_name = 'enp7s0f1'
self.tmpdir = self.useFixture(fixtures.TempDir())
self.fake_file = os.path.join(self.tmpdir.path, "address")
with open(self.fake_file, "w") as f:
f.write("a0:36:9f:72:00:00\n")
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/net" % self.pci_address, self.if_name,
"address")
self.assertEqual("a0:36:9f:72:00:00", mac)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails(self, mock_join, mock_listdir):
os.unlink(self.fake_file)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails_empty(self, mock_join, mock_listdir):
with open(self.fake_file, "w") as f:
f.truncate(0)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_physical_function_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address, pf_interface=True)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/physfn/net" % self.pci_address,
self.if_name, "address")
self.assertEqual("a0:36:9f:72:00:00", mac)
class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetVfNumByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
self.paths = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
self.assertEqual(vf_num, '3')
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_not_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.2'
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_exception(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
|
klmitch/nova
|
nova/tests/unit/pci/test_utils.py
|
Python
|
apache-2.0
| 10,569
|
import httplib
import json
def application(environ, start_response):
kml = environ['wsgi.input'].read()
api_dev_key = 'd0d28998a18c515e9706f159dbd348fa'
api_option = 'paste'
api_paste_code = kml
# Get the URL of the KML file from paste.kde.org.
request_data = {
'data': kml,
'language': 'xml'
}
request_body = json.dumps(request_data).encode('utf-8')
post_connection = httplib.HTTPSConnection('paste.kde.org')
post_connection.request('POST', '/api/json/create', request_body, {'Content-Type': 'application/json'})
post_data = json.loads(post_connection.getresponse().read())
response_body = 'https://paste.kde.org/{}/{}/raw/routes.kml'.format(post_data['result']['id'], post_data['result']['hash'])
# Send the URL back to the client.
status = '200 OK'
response_headers = [('Content-Type', 'application/vnd.google-earth.kml+xml; charset=utf-8'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return response_body
|
xordspar0/Data61-capstone-2016
|
kmlFileGenerator.py
|
Python
|
gpl-3.0
| 1,069
|
import json
from hashlib import md5
from typing import Dict, List, Optional, Tuple
from zeus.config import redis
from zeus.constants import Permission
from zeus.exceptions import ApiError, ApiUnauthorized, IdentityNeedsUpgrade
from zeus.models import Identity, Repository, User
from zeus.utils.github import GitHubClient
from zeus.utils.ssh import KeyPair
from .base import RepositoryProvider
ONE_DAY = 60 * 60 * 24
def get_github_client(user: User, scopes=()) -> Tuple[GitHubClient, Identity]:
identity = Identity.query.filter(
Identity.provider == "github", Identity.user_id == user.id
).first()
if not identity:
raise ApiUnauthorized
for scope in scopes:
if scope not in identity.scopes:
raise IdentityNeedsUpgrade(scope=scope, identity=identity)
return GitHubClient(token=identity.config["access_token"]), identity
class GitHubRepositoryProvider(RepositoryProvider):
def get_owners(self, user: User) -> List[dict]:
github, identity = get_github_client(user)
response = github.get("/user/orgs")
return [{"name": r["login"]} for r in response]
def get_repos_for_owner(
self, user: User, owner_name: str, include_private_repos=False
) -> List[dict]:
if include_private_repos:
github, identity = get_github_client(user, scopes=["repo"])
else:
github, identity = get_github_client(user)
cache = GitHubCache(user=user, client=github, scopes=identity.scopes)
results = []
for repo_data in cache.get_repos(owner_name, no_cache=not self.cache):
owner_name, repo_name = repo_data["full_name"].split("/", 1)
results.append(
{
"id": repo_data["id"],
"owner_name": owner_name,
"name": repo_name,
"permission": repo_data["permission"],
"url": repo_data["ssh_url"],
"config": {"full_name": repo_data["full_name"]},
}
)
return results
def get_repo(self, user: User, owner_name: str, repo_name: str) -> dict:
github, identity = get_github_client(user)
try:
repo_data = github.get("/repos/{}/{}".format(owner_name, repo_name))
except ApiError as exc:
if exc.code == 404 and "repo" not in identity.scopes:
raise IdentityNeedsUpgrade(scope="repo", identity=identity)
raise
owner_name, repo_name = repo_data["full_name"].split("/", 1)
return {
"id": repo_data["id"],
"owner_name": owner_name,
"name": repo_name,
"url": repo_data["ssh_url"],
"permission": Permission.admin
if repo_data["permissions"].get("admin", False)
else Permission.read,
"config": {"full_name": repo_data["full_name"]},
}
def add_key(self, user: User, owner_name: str, repo_name: str, key: KeyPair):
github, _ = get_github_client(user)
github.post(
"/repos/{}/{}/keys".format(owner_name, repo_name),
json={"title": "zeus", "key": key.public_key, "read_only": True},
)
def get_permission(self, user: User, repo: Repository) -> Optional[bool]:
try:
repo = self.get_repo(user, *repo.data["full_name"].split("/", 1))
except ApiError as exc:
if exc.code == 404:
return None
raise
return repo["permission"]
def has_access(self, user: User, repo: Repository) -> bool:
try:
self.get_repo(user, *repo.data["full_name"].split("/", 1))
except ApiError as exc:
if exc.code == 404:
return False
raise
return True
class GitHubCache(object):
version = 4
def __init__(self, user: User, client: GitHubClient = None, scopes=()):
self.user = user
self.scopes = scopes
if client is None:
self.client, _ = get_github_client(user)
else:
self.client = client
def get_repos(self, owner: str, no_cache=False) -> List[Dict]:
cache_key = "gh:{}:repos:{}:{}:{}".format(
self.version,
md5(self.client.token.encode("utf")).hexdigest(),
md5(b",".join(s.encode("utf") for s in self.scopes)).hexdigest(),
md5(owner.encode("utf-8")).hexdigest() if owner else "",
)
if no_cache:
result = None
else:
result = redis.get(cache_key)
if result is None:
# TODO(dcramer): paginate
if not owner:
endpoint = "/user/repos"
params = {"type": "owner"}
else:
endpoint = "/orgs/{}/repos".format(owner)
params = {}
result = []
has_results = True
while has_results and endpoint:
response = self.client.get(endpoint, params=params)
result.extend(
[
{
"id": r["id"],
"ssh_url": r["ssh_url"],
"full_name": r["full_name"],
"permission": Permission.admin
if r["permissions"].get("admin", False)
else Permission.read,
}
for r in response
]
)
has_results = bool(response)
if has_results:
endpoint = response.rel.get("next")
redis.setex(cache_key, ONE_DAY, json.dumps(result))
else:
result = json.loads(result)
for i in result:
# we need to coerce permission back into our Permission enum
i["permission"] = Permission(i["permission"])
return sorted(result, key=lambda x: x["full_name"])
|
getsentry/zeus
|
zeus/vcs/providers/github.py
|
Python
|
apache-2.0
| 6,063
|
#!/usr/bin/python
"""Test to verify presentation of selectable list items."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control><Shift>n"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to list item",
["KNOWN ISSUE: We are presenting nothing here",
""]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Right to next list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list Presentation Backgrounds list item'",
" VISIBLE: 'Presentation Backgrounds list it', cursor=1",
"SPEECH OUTPUT: 'Presentation Backgrounds'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"3. Left to previous list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list My Templates list item'",
" VISIBLE: 'My Templates list item', cursor=1",
"SPEECH OUTPUT: 'My Templates'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/oowriter/ui_role_list_item.py
|
Python
|
lgpl-2.1
| 1,384
|
import asyncio
import gc
import socket
import sys
import time
from unittest import mock
import psycopg2
import psycopg2.extensions
import psycopg2.extras
import pytest
import aiopg
from aiopg import DEFAULT_TIMEOUT, Connection, Cursor
PY_341 = sys.version_info >= (3, 4, 1)
@pytest.fixture
def connect(make_connection):
async def go(**kwargs):
return await make_connection(**kwargs)
return go
async def test_connect(connect):
conn = await connect()
assert isinstance(conn, Connection)
assert not conn._writing
assert conn._conn is conn.raw
assert not conn.echo
async def test_simple_select(connect):
conn = await connect()
cur = await conn.cursor()
assert isinstance(cur, Cursor)
await cur.execute("SELECT 1")
ret = await cur.fetchone()
assert (1,) == ret
async def test_simple_select_with_hstore(connect):
conn = await connect()
cur = await conn.cursor()
await cur.execute(
"""
CREATE EXTENSION IF NOT EXISTS hstore;
CREATE TABLE hfoo (id serial, hcol hstore);
INSERT INTO hfoo (hcol) VALUES ('"col1"=>"456", "col2"=>"zzz"');
"""
)
# Reconnect because this is where the problem happens.
cur.close()
conn.close()
conn = await connect(cursor_factory=psycopg2.extras.RealDictCursor)
cur = await conn.cursor()
await cur.execute("SELECT * FROM hfoo;")
ret = await cur.fetchone()
await cur.execute("DROP TABLE hfoo;")
assert {"hcol": {"col1": "456", "col2": "zzz"}, "id": 1} == ret
async def test_default_event_loop(connect, loop):
asyncio.set_event_loop(loop)
conn = await connect()
cur = await conn.cursor()
assert isinstance(cur, Cursor)
await cur.execute("SELECT 1")
ret = await cur.fetchone()
assert (1,) == ret
assert conn._loop is loop
async def test_close(connect):
conn = await connect()
await conn.close()
assert conn.closed
async def test_close_twice(connect):
conn = await connect()
await conn.close()
await conn.close()
assert conn.closed
async def test_with_cursor_factory(connect):
conn = await connect()
cur = await conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
await cur.execute("SELECT 1 AS a")
ret = await cur.fetchone()
assert 1 == ret["a"]
async def test_closed(connect):
conn = await connect()
assert not conn.closed
await conn.close()
assert conn.closed
async def test_tpc(connect):
conn = await connect()
xid = await conn.xid(1, "a", "b")
assert (1, "a", "b") == tuple(xid)
with pytest.raises(psycopg2.ProgrammingError):
await conn.tpc_begin(xid)
with pytest.raises(psycopg2.ProgrammingError):
await conn.tpc_prepare()
with pytest.raises(psycopg2.ProgrammingError):
await conn.tpc_commit(xid)
with pytest.raises(psycopg2.ProgrammingError):
await conn.tpc_rollback(xid)
with pytest.raises(psycopg2.ProgrammingError):
await conn.tpc_recover()
async def test_reset(connect):
conn = await connect()
with pytest.raises(psycopg2.ProgrammingError):
await conn.reset()
async def test_lobject(connect):
conn = await connect()
with pytest.raises(psycopg2.ProgrammingError):
await conn.lobject()
async def test_set_session(connect):
conn = await connect()
with pytest.raises(psycopg2.ProgrammingError):
await conn.set_session()
async def test_dsn(connect, pg_params):
conn = await connect()
pg_params["password"] = "x" * len(pg_params["password"])
assert "dbname" in conn.dsn
assert "user" in conn.dsn
assert "password" in conn.dsn
assert "host" in conn.dsn
assert "port" in conn.dsn
async def test_get_backend_pid(connect):
conn = await connect()
ret = await conn.get_backend_pid()
assert 0 != ret
async def test_get_parameter_status(connect):
conn = await connect()
ret = await conn.get_parameter_status("integer_datetimes")
assert "on" == ret
async def test_cursor_factory(connect):
conn = await connect(cursor_factory=psycopg2.extras.DictCursor)
assert psycopg2.extras.DictCursor is conn.cursor_factory
async def test_notices(connect):
conn = await connect()
cur = await conn.cursor()
await cur.execute("CREATE TABLE foo (id serial PRIMARY KEY);")
if not conn.notices:
raise pytest.skip("Notices are disabled")
assert [
"NOTICE: CREATE TABLE will create implicit sequence "
'"foo_id_seq" for serial column "foo.id"\n',
"NOTICE: CREATE TABLE / PRIMARY KEY will create "
'implicit index "foo_pkey" for table "foo"\n',
] == conn.notices
async def test_autocommit(connect):
conn = await connect()
assert conn.autocommit
with pytest.raises(psycopg2.ProgrammingError):
conn.autocommit = False
assert conn.autocommit
async def test_isolation_level(connect):
conn = await connect()
assert psycopg2.extensions.ISOLATION_LEVEL_DEFAULT == conn.isolation_level
with pytest.raises(psycopg2.ProgrammingError):
await conn.set_isolation_level(1)
assert psycopg2.extensions.ISOLATION_LEVEL_DEFAULT == conn.isolation_level
async def test_encoding(connect):
conn = await connect()
assert "UTF8" == conn.encoding
with pytest.raises(psycopg2.ProgrammingError):
await conn.set_client_encoding("ascii")
assert "UTF8" == conn.encoding
async def test_get_transaction_status(connect):
conn = await connect()
ret = await conn.get_transaction_status()
assert 0 == ret
async def test_transaction(connect):
conn = await connect()
with pytest.raises(psycopg2.ProgrammingError):
await conn.commit()
with pytest.raises(psycopg2.ProgrammingError):
await conn.rollback()
async def test_status(connect):
conn = await connect()
assert 1 == conn.status
async def test_protocol_version(connect):
conn = await connect()
assert 0 < conn.protocol_version
async def test_server_version(connect):
conn = await connect()
assert 0 < conn.server_version
async def test_cancel_not_supported(connect):
conn = await connect()
with pytest.raises(psycopg2.ProgrammingError):
await conn.cancel()
async def test_close2(connect, loop):
conn = await connect()
conn._writing = True
loop.add_writer(conn._fileno, conn._ready, conn._weakref)
conn.close()
assert not conn._writing
assert conn.closed
async def test_psyco_exception(connect):
conn = await connect()
cur = await conn.cursor()
with pytest.raises(psycopg2.ProgrammingError):
await cur.execute("SELECT * FROM unknown_table")
def test_ready_set_exception(connect, loop):
async def go():
conn = await connect()
impl = mock.Mock()
impl.notifies = []
exc = psycopg2.ProgrammingError("something bad")
impl.poll.side_effect = exc
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter("test")
conn._ready(conn._weakref)
assert not conn._writing
return waiter
waiter = loop.run_until_complete(go())
with pytest.raises(psycopg2.ProgrammingError):
loop.run_until_complete(waiter)
def test_ready_OK_with_waiter(connect, loop):
async def go():
conn = await connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = psycopg2.extensions.POLL_OK
# keep a reference to underlying psycopg connection, and the fd alive,
# otherwise the event loop will fail under windows
old_conn = conn._conn
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter("test")
conn._ready(conn._weakref)
assert not conn._writing
assert not impl.close.called
conn._conn = old_conn
return waiter
waiter = loop.run_until_complete(go())
assert loop.run_until_complete(waiter) is None
def test_ready_POLL_ERROR(connect, loop):
async def go():
conn = await connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = psycopg2.extensions.POLL_ERROR
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter("test")
handler = mock.Mock()
loop.set_exception_handler(handler)
conn._ready(conn._weakref)
handler.assert_called_with(
loop,
{
"connection": conn,
"message": "Fatal error on aiopg connection: "
"POLL_ERROR from underlying .poll() call",
},
)
assert not conn._writing
assert impl.close.called
return waiter
waiter = loop.run_until_complete(go())
with pytest.raises(psycopg2.OperationalError):
loop.run_until_complete(waiter)
def test_ready_unknown_answer(connect, loop):
async def go():
conn = await connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = 9999
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter("test")
handler = mock.Mock()
loop.set_exception_handler(handler)
conn._ready(conn._weakref)
handler.assert_called_with(
loop,
{
"connection": conn,
"message": "Fatal error on aiopg connection: "
"unknown answer 9999 from underlying .poll() call",
},
)
assert not conn._writing
assert impl.close.called
return waiter
waiter = loop.run_until_complete(go())
with pytest.raises(psycopg2.OperationalError):
loop.run_until_complete(waiter)
async def test_execute_twice(connect):
conn = await connect()
cur1 = await conn.cursor()
# cur2 = await conn.cursor()
coro1 = cur1.execute("SELECT 1")
fut1 = next(coro1.__await__())
assert isinstance(fut1, asyncio.Future)
coro2 = cur1.execute("SELECT 2")
with pytest.raises(RuntimeError):
next(coro2.__await__())
async def test_connect_to_unsupported_port(unused_port, loop, pg_params):
port = unused_port()
pg_params["port"] = port
with pytest.raises(psycopg2.OperationalError):
await aiopg.connect(**pg_params)
async def test_binary_protocol_error(connect):
conn = await connect()
s = socket.fromfd(conn._fileno, socket.AF_INET, socket.SOCK_STREAM)
s.send(b"garbage")
s.detach()
cur = await conn.cursor()
with pytest.raises(psycopg2.DatabaseError):
await cur.execute("SELECT 1")
async def test_closing_in_separate_task(connect):
closed_event = asyncio.Event()
exec_created = asyncio.Event()
async def waiter(conn):
cur = await conn.cursor()
fut = cur.execute("SELECT pg_sleep(1000)")
exec_created.set()
await closed_event.wait()
with pytest.raises(psycopg2.InterfaceError):
await fut
async def closer(conn):
await exec_created.wait()
await conn.close()
closed_event.set()
conn = await connect()
await asyncio.gather(waiter(conn), closer(conn))
async def test_connection_timeout(connect):
timeout = 0.1
conn = await connect(timeout=timeout)
assert timeout == conn.timeout
cur = await conn.cursor()
assert timeout == cur.timeout
t1 = time.time()
with pytest.raises(asyncio.TimeoutError):
await cur.execute("SELECT pg_sleep(1)")
t2 = time.time()
dt = t2 - t1
assert 0.08 <= dt <= 0.15, dt
async def test_override_cursor_timeout(connect):
timeout = 0.1
conn = await connect()
assert DEFAULT_TIMEOUT == conn.timeout
cur = await conn.cursor(timeout=timeout)
assert timeout == cur.timeout
t1 = time.time()
with pytest.raises(asyncio.TimeoutError):
await cur.execute("SELECT pg_sleep(1)")
t2 = time.time()
dt = t2 - t1
assert 0.08 <= dt <= 0.15, dt
async def test_echo(connect):
conn = await connect(echo=True)
assert conn.echo
async def test___del__(loop, pg_params, warning):
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
conn = await aiopg.connect(**pg_params)
with warning(ResourceWarning):
del conn
gc.collect()
msg = {
"connection": mock.ANY, # conn was deleted
"message": "Unclosed connection",
}
if loop.get_debug():
msg["source_traceback"] = mock.ANY
exc_handler.assert_called_with(loop, msg)
async def test_notifies(connect):
conn1 = await connect()
conn2 = await connect()
async with await conn1.cursor() as cur1, await conn2.cursor() as cur2:
await cur1.execute("LISTEN test")
assert conn2.notifies.empty()
await cur2.execute("NOTIFY test, 'hello'")
val = await conn1.notifies.get()
assert "test" == val.channel
assert "hello" == val.payload
await conn1.close()
with pytest.raises(psycopg2.OperationalError):
await conn1.notifies.get()
async def test_close_connection_on_timeout_error(connect):
conn = await connect()
cur = await conn.cursor(timeout=0.01)
with pytest.raises(asyncio.TimeoutError):
await cur.execute("SELECT pg_sleep(10)")
assert conn.closed
async def test_issue_111_crash_on_connect_error():
import aiopg.connection
with pytest.raises(psycopg2.ProgrammingError):
await aiopg.connection.connect("baddsn:1")
async def test_remove_reader_from_alive_fd(connect):
conn = await connect()
# keep a reference to underlying psycopg connection, and the fd alive
_conn = conn._conn # noqa
fileno = conn._fileno
impl = mock.Mock()
exc = psycopg2.OperationalError("Test")
impl.poll.side_effect = exc
conn._conn = impl
conn._fileno = fileno
m_remove_reader = mock.Mock()
conn._loop.remove_reader = m_remove_reader
conn._ready(conn._weakref)
assert not m_remove_reader.called
conn.close()
assert m_remove_reader.called_with(fileno)
async def test_remove_reader_from_dead_fd(connect):
conn = await connect()
fileno = conn._conn.fileno()
_conn = conn._conn
impl = mock.Mock()
exc = psycopg2.OperationalError("Test")
impl.poll.side_effect = exc
conn._conn = impl
conn._fileno = fileno
_conn.close()
m_remove_reader = mock.Mock()
old_remove_reader = conn._loop.remove_reader
conn._loop.remove_reader = m_remove_reader
conn._ready(conn._weakref)
assert m_remove_reader.called_with(fileno)
m_remove_reader.reset_mock()
conn.close()
assert not m_remove_reader.called
old_remove_reader(fileno)
async def test_connection_on_server_restart(connect, pg_server, docker):
# Operation on closed connection should raise OperationalError
conn = await connect()
cur = await conn.cursor()
await cur.execute("SELECT 1")
ret = await cur.fetchone()
assert (1,) == ret
docker.restart(container=pg_server["Id"])
with pytest.raises(psycopg2.OperationalError):
await cur.execute("SELECT 1")
conn.close()
# Wait for postgres to be up and running again before moving on
# so as the restart won't affect other tests
delay = 0.001
for i in range(100):
try:
conn = await connect()
conn.close()
break
except psycopg2.Error:
time.sleep(delay)
delay *= 2
else:
pytest.fail("Cannot connect to the restarted server")
async def test_connection_notify_on_disconnect(
connect, pg_params, tcp_proxy, unused_port, loop
):
server_port = pg_params["port"]
proxy_port = unused_port()
tcp_proxy = await tcp_proxy(proxy_port, server_port)
async with await connect(port=proxy_port) as connection:
async def read_notifies(c):
while True:
await c.notifies.get()
reader_task = loop.create_task(read_notifies(connection))
await asyncio.sleep(0.1)
await tcp_proxy.disconnect()
try:
with pytest.raises(psycopg2.OperationalError):
await asyncio.wait_for(reader_task, 10)
finally:
reader_task.cancel()
|
aio-libs/aiopg
|
tests/test_connection.py
|
Python
|
bsd-2-clause
| 16,350
|
class IndexedTextSearch:
"""
:param statement_comparison_function: A comparison class.
Defaults to ``LevenshteinDistance``.
:param search_page_size:
The maximum number of records to load into memory at a time when searching.
Defaults to 1000
"""
name = 'indexed_text_search'
def __init__(self, chatbot, **kwargs):
from chatterbot.comparisons import LevenshteinDistance
self.chatbot = chatbot
statement_comparison_function = kwargs.get(
'statement_comparison_function',
LevenshteinDistance
)
self.compare_statements = statement_comparison_function(
language=self.chatbot.storage.tagger.language
)
self.search_page_size = kwargs.get(
'search_page_size', 1000
)
def search(self, input_statement, **additional_parameters):
"""
Search for close matches to the input. Confidence scores for
subsequent results will order of increasing value.
:param input_statement: A statement.
:type input_statement: chatterbot.conversation.Statement
:param **additional_parameters: Additional parameters to be passed
to the ``filter`` method of the storage adapter when searching.
:rtype: Generator yielding one closest matching statement at a time.
"""
self.chatbot.logger.info('Beginning search for close text match')
input_search_text = input_statement.search_text
if not input_statement.search_text:
self.chatbot.logger.warn(
'No value for search_text was available on the provided input'
)
input_search_text = self.chatbot.storage.tagger.get_text_index_string(
input_statement.text
)
search_parameters = {
'search_text_contains': input_search_text,
'persona_not_startswith': 'bot:',
'page_size': self.search_page_size
}
if additional_parameters:
search_parameters.update(additional_parameters)
statement_list = self.chatbot.storage.filter(**search_parameters)
best_confidence_so_far = 0
self.chatbot.logger.info('Processing search results')
# Find the closest matching known statement
for statement in statement_list:
confidence = self.compare_statements(input_statement, statement)
if confidence > best_confidence_so_far:
best_confidence_so_far = confidence
statement.confidence = confidence
self.chatbot.logger.info('Similar text found: {} {}'.format(
statement.text, confidence
))
yield statement
class TextSearch:
"""
:param statement_comparison_function: A comparison class.
Defaults to ``LevenshteinDistance``.
:param search_page_size:
The maximum number of records to load into memory at a time when searching.
Defaults to 1000
"""
name = 'text_search'
def __init__(self, chatbot, **kwargs):
from chatterbot.comparisons import LevenshteinDistance
self.chatbot = chatbot
statement_comparison_function = kwargs.get(
'statement_comparison_function',
LevenshteinDistance
)
self.compare_statements = statement_comparison_function(
language=self.chatbot.storage.tagger.language
)
self.search_page_size = kwargs.get(
'search_page_size', 1000
)
def search(self, input_statement, **additional_parameters):
"""
Search for close matches to the input. Confidence scores for
subsequent results will order of increasing value.
:param input_statement: A statement.
:type input_statement: chatterbot.conversation.Statement
:param **additional_parameters: Additional parameters to be passed
to the ``filter`` method of the storage adapter when searching.
:rtype: Generator yielding one closest matching statement at a time.
"""
self.chatbot.logger.info('Beginning search for close text match')
search_parameters = {
'persona_not_startswith': 'bot:',
'page_size': self.search_page_size
}
if additional_parameters:
search_parameters.update(additional_parameters)
statement_list = self.chatbot.storage.filter(**search_parameters)
best_confidence_so_far = 0
self.chatbot.logger.info('Processing search results')
# Find the closest matching known statement
for statement in statement_list:
confidence = self.compare_statements(input_statement, statement)
if confidence > best_confidence_so_far:
best_confidence_so_far = confidence
statement.confidence = confidence
self.chatbot.logger.info('Similar text found: {} {}'.format(
statement.text, confidence
))
yield statement
|
vkosuri/ChatterBot
|
chatterbot/search.py
|
Python
|
bsd-3-clause
| 5,138
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-13 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0019_auto_20161013_1702'),
]
operations = [
migrations.AlterField(
model_name='action',
name='classification',
field=models.CharField(max_length=100, null=True),
),
]
|
datamade/django-councilmatic
|
councilmatic_core/migrations/0020_auto_20161013_1704.py
|
Python
|
mit
| 480
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Geos(Package):
"""GEOS (Geometry Engine - Open Source) is a C++ port of the Java
Topology Suite (JTS). As such, it aims to contain the complete
functionality of JTS in C++. This includes all the OpenGIS
Simple Features for SQL spatial predicate functions and spatial
operators, as well as specific JTS enhanced topology functions."""
homepage = "http://trac.osgeo.org/geos/"
url = "http://download.osgeo.org/geos/geos-3.4.2.tar.bz2"
# Verison 3.5.0 supports Autotools and CMake
version('3.5.0', '136842690be7f504fba46b3c539438dd')
# Versions through 3.4.2 have CMake, but only Autotools is supported
version('3.4.2', 'fc5df2d926eb7e67f988a43a92683bae')
version('3.4.1', '4c930dec44c45c49cd71f3e0931ded7e')
version('3.4.0', 'e41318fc76b5dc764a69d43ac6b18488')
version('3.3.9', '4794c20f07721d5011c93efc6ccb8e4e')
version('3.3.8', '75be476d0831a2d14958fed76ca266de')
version('3.3.7', '95ab996d22672b067d92c7dee2170460')
version('3.3.6', '6fadfb941541875f4976f75fb0bbc800')
version('3.3.5', '2ba61afb7fe2c5ddf642d82d7b16e75b')
version('3.3.4', '1bb9f14d57ef06ffa41cb1d67acb55a1')
version('3.3.3', '8454e653d7ecca475153cc88fd1daa26')
# # Python3 is not supported.
# variant('python', default=False, description='Enable Python support')
# extends('python', when='+python')
# depends_on('python', when='+python')
# depends_on('swig', when='+python')
def install(self, spec, prefix):
args = ["--prefix=%s" % prefix]
# if '+python' in spec:
# os.environ['PYTHON'] = spec['python'].command.path
# os.environ['SWIG'] = spec['swig'].command.path
#
# args.append("--enable-python")
configure(*args)
make()
make("install")
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/geos/package.py
|
Python
|
lgpl-2.1
| 3,067
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GjqyxyxxcxxtItem(scrapy.Item):
#base
UNISCID = scrapy.Field() #统一社会信用代码
ENTNAME = scrapy.Field() #名称 || 企业名称
ENTTYPE = scrapy.Field() #类型
LEREP = scrapy.Field() #法定代表人 || 投资人
REGCAP = scrapy.Field() #注册资本
ESTDATE = scrapy.Field() #注册日期 || 成立日期
OPFROM = scrapy.Field() #营业期限自
OPTO = scrapy.Field() #营业期限至
REGORG = scrapy.Field() #登记机关
APPRDATE = scrapy.Field() #核准登记日期 || 核准日期
REGSTATE = scrapy.Field() #登记状态
DOM = scrapy.Field() #经营场所||住所
OPSCOPE = scrapy.Field() #经营范围
REGNO = scrapy.Field() #工商注册号 || 注册号
ORGAN_CODE = scrapy.Field() #组织机构代码
IXINNUOBM = scrapy.Field()
ID = scrapy.Field()
#czxx
LEGINFO = scrapy.Field()
"""
name = scrapy.Field() #股东名称||合伙人
name = scrapy.Field() #股东类型||合伙人类型
name = scrapy.Field() #证照/证件类型 || 证照/证件号码 || 证照/证件编号
name = scrapy.Field() #详情
"""
#ryxx
PERINFO = scrapy.Field()
"""
name = scrapy.Field() #姓名
name = scrapy.Field() #角色
"""
#bgxx
CHGINFO = scrapy.Field()
"""
name = scrapy.Field() #变更事项
name = scrapy.Field() #变更前内容
name = scrapy.Field() #变更后内容
name = scrapy.Field() #变更日期
"""
#ryjgxx
BRANINFO = scrapy.Field() #分支机构信息
PARTNERCHAGEINFO = scrapy.Field() #股东变更信息
MORTINFO = scrapy.Field() #动产抵押登记信息
EQUINFO = scrapy.Field() #股权出质登记信息
FREEZEINFO = scrapy.Field() #司法股权冻结信息
LAWINFO = scrapy.Field() #严重违法信息
EXCEINFO = scrapy.Field() #经营异常信息
PUNINFO = scrapy.Field() #行政处罚
industry = scrapy.Field() #产业 天眼查
company_md5 = scrapy.Field()
company_id = scrapy.Field()
crawl_time = scrapy.Field() #采集时间
detail_id = scrapy.Field()
detail_formdata = scrapy.Field()
czxx_formdata = scrapy.Field()
ryxx_formdata = scrapy.Field()
bgxx_formdata = scrapy.Field()
|
AisinoPythonTeam/PythonAiniso
|
gjqyxyxxcxxt/gjqyxyxxcxxt/items.py
|
Python
|
apache-2.0
| 2,430
|
'''
Created on Sep 4, 2014
@author: sergio
'''
import neo.core
import numpy
import psycopg2
from .. import dbutils
from quantities import s
class SpikeDB(neo.core.Spike):
'''
'''
#TODO: Documentation of SpikeDB.
def __init__(self, id_unit = None, id_segment = None, id_recordingchannel = None,
time = None, waveform = None, left_sweep = None,
sampling_rate = None, name = None, description = None,
file_origin = None, index = None):
self.id = None
self.id_unit = id_unit
self.id_segment = id_segment
self.id_recordingchannel = id_recordingchannel
self.index = index
if time != None:
if (type(time) == numpy.float64) or (type(time) == numpy.float):
self.time = numpy.array(time)*s
else:
self.time = float(time.simplified)
else:
self.time = numpy.array(0.0)*s
self.waveform = waveform
if left_sweep != None:
self.left_sweep = float(left_sweep.simplified)
else:
self.left_sweep = left_sweep
if sampling_rate != None:
if (type(time) == numpy.float64) or (type(time) == numpy.float):
self.sampling_rate = sampling_rate
else:
self.sampling_rate = float(sampling_rate.simplified)
self.name = name
self.description = description
self.file_origin = file_origin
def save(self, connection):
# Check mandatory values
if self.id_segment == None:
raise StandardError("Spike must have id_segment.")
if self.waveform == []:
raise StandardError("Spike must have a signal (waveform).")
if self.index == []:
raise StandardError("""Spike must have a index, it is the index of
the maximum point of the signal.""")
if self.left_sweep != None:
left_sweep = float(self.left_sweep)
else:
left_sweep = self.left_sweep
if self.time != None:
time = float(self.time)
else:
time = self.time
if self.sampling_rate != None:
sampling_rate = float(self.sampling_rate)
else:
sampling_rate = self.sampling_rate
# Format signal
signalb = numpy.int16(self.waveform)
# QUERY
cursor = connection.cursor()
if self.id == None:
query = """INSERT INTO spike
(id_unit, id_segment, id_recordingchannel, time, waveform,
left_sweep, sampling_rate, name, description, file_origin, index)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(query,[self.id_unit, self.id_segment,
self.id_recordingchannel, float(time),
psycopg2.Binary(signalb), left_sweep,
sampling_rate, self.name,
self.description, self.file_origin,
self.index])
else:
query = """ UPDATE public.spike SET id_unit = %s, id_segment = %s,
id_recordingchannel = %s,
TIME = %s, waveform = %s, left_sweep = %s,
sampling_rate = %s, name = %s, description = %s,
file_origin = %s, index = %s
WHERE id = %s"""
cursor.execute(query,[self.id_unit, self.id_segment,
self.id_recordingchannel, float(self.time),
psycopg2.Binary(signalb), float(self.left_sweep),
int(self.sampling_rate), self.name,
self.description, self.file_origin,
self.index])
connection.commit()
# Get ID
try:
[(id, _)] = dbutils.get_id(connection, 'spike',
index = self.index,
id_segment = self.id_segment,
id_recordingchannel = self.id_recordingchannel)
self.id = id
return id
except:
print dbutils.get_id(connection, 'spike',
index = self.index,
id_segment = self.id_segment,
id_recordingchannel = self.id_recordingchannel)
def get_from_db(connection, id_block, channel, **kwargs):
for parameter in kwargs.keys():
if parameter not in ["id", "id_segment", "id_recordingchannel", "index", "time"]:
raise StandardError("""Parameter %s do not belong to SpikeDB.""")%parameter
if id_block == None:
raise StandardError(""" You must specify id_block.""")
if channel == None:
raise StandardError(""" You must specify number of channel.""")
# QUERY
cursor = connection.cursor()
query = """SELECT spike.id,
spike.id_unit,
spike.id_segment,
spike.id_recordingchannel,
spike.time,
spike.waveform,
spike.index,
spike.sampling_rate
FROM spike
JOIN recordingchannel ON id_recordingchannel = recordingchannel.id
WHERE recordingchannel.id_block = %s and
recordingchannel.index = %s """%(id_block, channel)
constraint = ""
for key, value in kwargs.iteritems():
constraint = "%s and spike.%s='%s'"%(constraint,key,value)
if constraint != "":
query = query + constraint
cursor.execute(query)
results = cursor.fetchall()
spikes = []
for result in results:
spike = SpikeDB(id_unit = result[1], id_segment = result[2],
id_recordingchannel = result[3], time = result[4],
waveform = numpy.frombuffer(result[5], numpy.int16),
index = result[6], sampling_rate = result[7])
spike.id = result[0]
spikes.append(spike)
return spikes
def get_ids_from_db(connection, id_block, channel):
# QUERY
cursor = connection.cursor()
query = """SELECT spike.id
FROM spike
JOIN recordingchannel ON id_recordingchannel = recordingchannel.id
WHERE recordingchannel.id_block = %s and
recordingchannel.index = %s """%(id_block, channel)
cursor.execute(query)
results = cursor.fetchall()
ids = []
if results != []:
for tuple in results:
ids.append(tuple[0])
return ids
def update(connection, id, **kwargs):
#TODO: add this function in Class SpikeDB
cursor = connection.cursor()
query = """UPDATE spike
SET """
columns = dbutils.column_names('spike', connection)
for parameter in kwargs.keys():
if parameter not in columns:
raise StandardError("Parameter %s do not belong to SpikeDB."%parameter)
parameters = ""
for key, value in kwargs.iteritems():
parameters = "%s %s= '%s', "%(parameters, key, value)
parameters = parameters[0:len(parameters)-2]
query = query + parameters
query = query + " WHERE id = %s"%id
cursor.execute(query)
connection.commit()
if __name__ == '__main__':
username = 'postgres'
password = 'postgres'
host = '192.168.2.2'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
# spike = SpikeDB(id_segment = 33, waveform = [1,2,3,4,5,6], index = 156)
# spike.save(dbconn)
#get_from_db(dbconn, id_block = 54, channel = 3, index = 493638)
#spikes_id = get_ids_from_db(dbconn, id_block = 54, channel = 3)
update(dbconn, 1035, p1 = 1, p2 = 2, p3 = 3)
pass
|
sergiohr/NeoDB
|
neodb/core/spikedb.py
|
Python
|
gpl-3.0
| 8,447
|
from __future__ import unicode_literals
import frappe
import frappe.utils
import frappe.defaults
from frappe.utils import add_days, cint, cstr, date_diff, flt, getdate, nowdate, \
get_first_day, get_last_day, comma_and, split_emails
from frappe.model.naming import make_autoname
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date, get_party_details
from frappe.model.mapper import get_mapped_doc
month_map = {'Monthly': 1, 'Quarterly': 3, 'Half-yearly': 6, 'Yearly': 12}
date_field_map = {
"Sales Order": "transaction_date",
"Sales Invoice": "posting_date",
"Purchase Order": "transaction_date",
"Purchase Invoice": "posting_date"
}
def create_recurring_documents():
manage_recurring_documents("Sales Order")
manage_recurring_documents("Sales Invoice")
manage_recurring_documents("Purchase Order")
manage_recurring_documents("Purchase Invoice")
def manage_recurring_documents(doctype, next_date=None, commit=True):
"""
Create recurring documents on specific date by copying the original one
and notify the concerned people
"""
next_date = next_date or nowdate()
date_field = date_field_map[doctype]
condition = " and ifnull(status, '') != 'Stopped'" if doctype in ("Sales Order", "Purchase Order") else ""
recurring_documents = frappe.db.sql("""select name, recurring_id
from `tab{0}` where ifnull(is_recurring, 0)=1
and docstatus=1 and next_date=%s
and next_date <= ifnull(end_date, '2199-12-31') {1}""".format(doctype, condition), next_date)
exception_list = []
for ref_document, recurring_id in recurring_documents:
if not frappe.db.sql("""select name from `tab%s`
where %s=%s and recurring_id=%s and docstatus=1"""
% (doctype, date_field, '%s', '%s'), (next_date, recurring_id)):
try:
ref_wrapper = frappe.get_doc(doctype, ref_document)
if hasattr(ref_wrapper, "before_recurring"):
ref_wrapper.before_recurring()
new_document_wrapper = make_new_document(ref_wrapper, date_field, next_date)
send_notification(new_document_wrapper)
if commit:
frappe.db.commit()
except:
if commit:
frappe.db.rollback()
frappe.db.begin()
frappe.db.sql("update `tab%s` \
set is_recurring = 0 where name = %s" % (doctype, '%s'),
(ref_document))
notify_errors(ref_document, doctype, ref_wrapper.get("customer") or ref_wrapper.get("supplier"),
ref_wrapper.owner)
frappe.db.commit()
exception_list.append(frappe.get_traceback())
finally:
if commit:
frappe.db.begin()
if exception_list:
exception_message = "\n\n".join([cstr(d) for d in exception_list])
frappe.throw(exception_message)
def make_new_document(ref_wrapper, date_field, posting_date):
from erpnext.accounts.utils import get_fiscal_year
new_document = frappe.copy_doc(ref_wrapper)
mcount = month_map[ref_wrapper.recurring_type]
from_date = get_next_date(ref_wrapper.from_date, mcount)
# get last day of the month to maintain period if the from date is first day of its own month
# and to date is the last day of its own month
if (cstr(get_first_day(ref_wrapper.from_date)) == \
cstr(ref_wrapper.from_date)) and \
(cstr(get_last_day(ref_wrapper.to_date)) == \
cstr(ref_wrapper.to_date)):
to_date = get_last_day(get_next_date(ref_wrapper.to_date,
mcount))
else:
to_date = get_next_date(ref_wrapper.to_date, mcount)
new_document.update({
date_field: posting_date,
"from_date": from_date,
"to_date": to_date,
"fiscal_year": get_fiscal_year(posting_date)[0],
"owner": ref_wrapper.owner,
})
if ref_wrapper.doctype == "Sales Order":
new_document.update({
"delivery_date": get_next_date(ref_wrapper.delivery_date, mcount,
cint(ref_wrapper.repeat_on_day_of_month))
})
new_document.submit()
return new_document
def get_next_date(dt, mcount, day=None):
dt = getdate(dt)
from dateutil.relativedelta import relativedelta
dt += relativedelta(months=mcount, day=day)
return dt
def send_notification(new_rv):
"""Notify concerned persons about recurring document generation"""
frappe.sendmail(new_rv.notification_email_address,
subject= _("New {0}: #{1}").format(new_rv.doctype, new_rv.name),
message = _("Please find attached {0} #{1}").format(new_rv.doctype, new_rv.name),
attachments = [frappe.attach_print(new_rv.doctype, new_rv.name, file_name=new_rv.name, print_format=new_rv.recurring_print_format)])
def notify_errors(doc, doctype, party, owner):
from frappe.utils.user import get_system_managers
recipients = get_system_managers(only_name=True)
frappe.sendmail(recipients + [frappe.db.get_value("User", owner, "email")],
subject="[Urgent] Error while creating recurring %s for %s" % (doctype, doc),
message = frappe.get_template("templates/emails/recurring_document_failed.html").render({
"type": doctype,
"name": doc,
"party": party
}))
assign_task_to_owner(doc, doctype, "Recurring Invoice Failed", recipients)
def assign_task_to_owner(doc, doctype, msg, users):
for d in users:
from frappe.desk.form import assign_to
args = {
'assign_to' : d,
'doctype' : doctype,
'name' : doc,
'description' : msg,
'priority' : 'High'
}
assign_to.add(args)
def validate_recurring_document(doc):
if doc.is_recurring:
validate_notification_email_id(doc)
if not doc.recurring_type:
msgprint(_("Please select {0}").format(doc.meta.get_label("recurring_type")),
raise_exception=1)
elif not (doc.from_date and doc.to_date):
throw(_("Period From and Period To dates mandatory for recurring {0}").format(doc.doctype))
#
def convert_to_recurring(doc, posting_date):
if doc.is_recurring:
if not doc.recurring_id:
frappe.db.set(doc, "recurring_id", doc.name)
set_next_date(doc, posting_date)
elif doc.recurring_id:
frappe.db.sql("""update `tab%s` set is_recurring = 0
where recurring_id = %s""" % (doc.doctype, '%s'), (doc.recurring_id))
#
def validate_notification_email_id(doc):
if doc.notification_email_address:
email_list = split_emails(doc.notification_email_address.replace("\n", ""))
from frappe.utils import validate_email_add
for email in email_list:
if not validate_email_add(email):
throw(_("{0} is an invalid email address in 'Notification \
Email Address'").format(email))
else:
frappe.throw(_("'Notification Email Addresses' not specified for recurring %s") \
% doc.doctype)
def set_next_date(doc, posting_date):
""" Set next date on which recurring document will be created"""
if not doc.repeat_on_day_of_month:
msgprint(_("Please enter 'Repeat on Day of Month' field value"), raise_exception=1)
next_date = get_next_date(posting_date, month_map[doc.recurring_type],
cint(doc.repeat_on_day_of_month))
frappe.db.set(doc, 'next_date', next_date)
msgprint(_("Next Recurring {0} will be created on {1}").format(doc.doctype, next_date))
|
susuchina/ERPNEXT
|
erpnext/controllers/recurring_document.py
|
Python
|
agpl-3.0
| 6,909
|
import shlex
import envi.archs.i386 as e_i386
def eflags(vdb, line):
'''
Shows or flips the status of the eflags register bits.
Usage: eflags [flag short name]
'''
trace = vdb.getTrace()
argv = shlex.split(line)
if len(argv) not in (0, 1):
return vdb.do_help('eflags')
if len(argv) > 0:
flag = argv[0].upper()
valid_flags = trace.getStatusFlags().keys()
if flag not in valid_flags:
raise Exception('invalid flag: %s, valid flags %s' % (flag, valid_flags))
value = trace.getRegisterByName(flag)
trace.setRegisterByName(flag, not bool(value))
# TODO: this is not plumbed through to flags gui due to new gui
# eventing coming soon.
vdb.vdbUIEvent('vdb:setflags')
return
ef = trace.getRegisterByName('eflags')
vdb.vprint('%16s: %s' % ('Carry', bool(ef & e_i386.EFLAGS_CF)))
vdb.vprint('%16s: %s' % ('Parity', bool(ef & e_i386.EFLAGS_PF)))
vdb.vprint('%16s: %s' % ('Adjust', bool(ef & e_i386.EFLAGS_AF)))
vdb.vprint('%16s: %s' % ('Zero', bool(ef & e_i386.EFLAGS_ZF)))
vdb.vprint('%16s: %s' % ('Sign', bool(ef & e_i386.EFLAGS_SF)))
vdb.vprint('%16s: %s' % ('Trap', bool(ef & e_i386.EFLAGS_TF)))
vdb.vprint('%16s: %s' % ('Interrupt', bool(ef & e_i386.EFLAGS_IF)))
vdb.vprint('%16s: %s' % ('Direction', bool(ef & e_i386.EFLAGS_DF)))
vdb.vprint('%16s: %s' % ('Overflow', bool(ef & e_i386.EFLAGS_OF)))
def vdbExtension(vdb, trace):
vdb.addCmdAlias('db','mem -F bytes')
vdb.addCmdAlias('dw','mem -F u_int_16')
vdb.addCmdAlias('dd','mem -F u_int_32')
vdb.addCmdAlias('dq','mem -F u_int_64')
vdb.addCmdAlias('dr','mem -F "Deref View"')
vdb.addCmdAlias('ds','mem -F "Symbols View"')
vdb.registerCmdExtension(eflags)
|
imjonsnooow/vivisect
|
vdb/extensions/i386.py
|
Python
|
apache-2.0
| 1,800
|
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
from _emerge.SlotObject import SlotObject
from collections import deque
class SequentialTaskQueue(SlotObject):
__slots__ = ("max_jobs", "running_tasks") + \
("_dirty", "_scheduling", "_task_queue")
def __init__(self, **kwargs):
SlotObject.__init__(self, **kwargs)
self._task_queue = deque()
self.running_tasks = set()
if self.max_jobs is None:
self.max_jobs = 1
self._dirty = True
def add(self, task):
self._task_queue.append(task)
self._dirty = True
def addFront(self, task):
self._task_queue.appendleft(task)
self._dirty = True
def schedule(self):
if not self._dirty:
return False
if not self:
return False
if self._scheduling:
# Ignore any recursive schedule() calls triggered via
# self._task_exit().
return False
self._scheduling = True
task_queue = self._task_queue
running_tasks = self.running_tasks
max_jobs = self.max_jobs
state_changed = False
while task_queue and \
(max_jobs is True or len(running_tasks) < max_jobs):
task = task_queue.popleft()
cancelled = getattr(task, "cancelled", None)
if not cancelled:
running_tasks.add(task)
task.addExitListener(self._task_exit)
task.start()
state_changed = True
self._dirty = False
self._scheduling = False
return state_changed
def _task_exit(self, task):
"""
Since we can always rely on exit listeners being called, the set of
running tasks is always pruned automatically and there is never any need
to actively prune it.
"""
self.running_tasks.remove(task)
if self._task_queue:
self._dirty = True
def clear(self):
self._task_queue.clear()
running_tasks = self.running_tasks
while running_tasks:
task = running_tasks.pop()
task.removeExitListener(self._task_exit)
task.cancel()
self._dirty = False
def __bool__(self):
return bool(self._task_queue or self.running_tasks)
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
def __len__(self):
return len(self._task_queue) + len(self.running_tasks)
|
Neuvoo/legacy-portage
|
pym/_emerge/SequentialTaskQueue.py
|
Python
|
gpl-2.0
| 2,131
|
#! /usr/bin/env python
"""Wrapper for the BioTaxIDMapper package."""
from os import sys
import argparse
from taxonomydb import TaxDb
from own_exceptions import NoRecord, NoProteinLink
usage = """Biological Taxonomies ID Mapper.
This simple tool allows to map NCBI taxonomy database information onto files
containing FASTA-like definition lines. Taxonomic lineage is appended to the
defline between '#|' and '|#' delimiters. Each node is separated with '<->'.
e.g.:
> (...) #|cellular organisms <-> Bacteria |#
To run simply type:
./mapper.py -i [IN_FILE] -o [OUT_FILE]
"""
def parse_arguments(argv):
"""Parses user arguments."""
parser = argparse.ArgumentParser(description=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i',
'--input-file',
help='Input file with FASTA-like deflines',
type=str,
required=True)
parser.add_argument('-o',
'--output-file',
help='Output file with taxonomies marked. ' +
'If not specified results will be written to' +
'\'annotated.txt\'',
type=str,
required=False,
default='annotated.txt')
args = parser.parse_args(argv)
return args
def version_to_accession(protein_ver):
"""Extracts accession from a version.
Params:
protein_ver (str): Protein version ID
Returns:
protein_acc (str): Protein accession ID
"""
if '.' in protein_ver:
return protein_ver[:protein_ver.rfind('.')]
else:
return protein_ver
def read_protein_acc(defline):
"""Retrieves protein accession from a definition line.
Params:
defline (str): Definition line in FASTA-like format
Returns:
protein_acc (str): Protein accession ID
"""
if defline.startswith('gi|'):
protein_version = defline.split('|')[3]
return version_to_accession(protein_version)
elif ( defline.startswith('sp|') or
defline.startswith('tr|') ):
return defline.split('|')[1]
else:
protein_version = defline.split()[0]
return version_to_accession(protein_version)
def map_taxonomies(in_file, out_file):
"""Maps taxonomies onto deflines from input file.
Params:
in_file (str): Input filename
out_file (str): Output filename
Returns:
Writes output file, as specified in input parameters, with taxonomy
markings.
"""
# Connect to the database
database = TaxDb()
# Open input and output files for reading / writing
with open(in_file, 'r') as ifile, open(out_file, 'w') as ofile:
# Iterate in line per line fashion
for line in ifile:
# If it is not defline - write to putput as it is
if not line.startswith('>'):
ofile.write(line)
continue
# Retrieve data from the database
protein_acc = read_protein_acc(line[1:])
# Catch exceptions notifying about non-existing entries in
# the database. In such case there should be no mapping
# and defline should be written to output as it is
try:
taxid = database.protein_taxid(protein_acc)
lineage = database.get_lineage_from_db(taxid)
except (NoRecord, NoProteinLink) as e:
ofile.write(line)
continue
# Create lineage string in a human-readable way
lineage = "<->".join(lineage)
# Create new definition line containing lineage
new_defline = '%s #| %s |#\n' % (line.strip(),
lineage)
# Write to output
ofile.write(new_defline)
# Just in case - disconnect from the database
database.disconnect()
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
map_taxonomies(args.input_file, args.output_file)
|
mkorycinski/BioTaxIDMapper
|
mapper.py
|
Python
|
apache-2.0
| 4,168
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Vendored copy from git://github.com/reallistic/rq-gevent-worker.git
# because there isn't a working release.
from __future__ import absolute_import, division, print_function, unicode_literals
from gevent import monkey
from gevent.hub import LoopExit
monkey.patch_all()
import logging
import signal
import gevent
import gevent.pool
from rq import Worker
from rq.job import JobStatus
from rq.timeouts import BaseDeathPenalty, JobTimeoutException
from rq.worker import StopRequested, green, blue, WorkerStatus
from rq.exceptions import DequeueTimeout
from rq.logutils import setup_loghandlers
from rq.version import VERSION
class GeventDeathPenalty(BaseDeathPenalty):
def setup_death_penalty(self):
exception = JobTimeoutException(
"Gevent Job exceeded maximum timeout value (%d seconds)." % self._timeout
)
self.gevent_timeout = gevent.Timeout(self._timeout, exception)
self.gevent_timeout.start()
def cancel_death_penalty(self):
self.gevent_timeout.cancel()
class GeventWorker(Worker):
death_penalty_class = GeventDeathPenalty
DEFAULT_POOL_SIZE = 20
def __init__(self, *args, **kwargs):
pool_size = self.DEFAULT_POOL_SIZE
if "pool_size" in kwargs:
pool_size = kwargs.pop("pool_size")
self.gevent_pool = gevent.pool.Pool(pool_size)
self.children = []
self.gevent_worker = None
super(GeventWorker, self).__init__(*args, **kwargs)
def register_birth(self):
super(GeventWorker, self).register_birth()
self.connection.hset(self.key, "pool_size", self.gevent_pool.size)
def heartbeat(self, timeout=0, pipeline=None):
connection = pipeline if pipeline is not None else self.connection
super(GeventWorker, self).heartbeat(timeout)
connection.hset(self.key, "curr_pool_len", len(self.gevent_pool))
def _install_signal_handlers(self):
def request_force_stop():
self.log.warning("Cold shut down.")
self.gevent_pool.kill()
raise SystemExit()
def request_stop():
if not self._stop_requested:
gevent.signal_handler(signal.SIGINT, request_force_stop)
gevent.signal_handler(signal.SIGTERM, request_force_stop)
self.log.warning("Warm shut down requested.")
self.log.warning(
"Stopping after all greenlets are finished. "
"Press Ctrl+C again for a cold shutdown."
)
self._stop_requested = True
self.gevent_pool.join()
if self.gevent_worker is not None:
self.gevent_worker.kill(StopRequested)
gevent.signal_handler(signal.SIGINT, request_stop)
gevent.signal_handler(signal.SIGTERM, request_stop)
def set_current_job_id(self, job_id, pipeline=None):
pass
def _work(self, burst=False, logging_level=logging.INFO):
"""Starts the work loop.
Pops and performs all jobs on the current list of queues. When all
queues are empty, block and wait for new jobs to arrive on any of the
queues, unless `burst` mode is enabled.
The return value indicates whether any jobs were processed.
"""
setup_loghandlers(logging_level)
self._install_signal_handlers()
self.did_perform_work = False
self.register_birth()
self.log.info(
"RQ GEVENT worker (Greenlet pool size={0}) {1!r} started, version {2}".format(
self.gevent_pool.size, self.key, VERSION
)
)
self.set_state(WorkerStatus.STARTED)
try:
while True:
try:
self.check_for_suspension(burst)
if self.should_run_maintenance_tasks:
self.clean_registries()
if self._stop_requested:
self.log.info("Stopping on request.")
break
timeout = None if burst else max(1, self.default_worker_ttl - 60)
result = self.dequeue_job_and_maintain_ttl(timeout)
if result is None and burst:
self.log.info("RQ worker {0!r} done, quitting".format(self.key))
try:
# Make sure dependented jobs are enqueued.
gevent.wait(self.children)
except LoopExit:
pass
result = self.dequeue_job_and_maintain_ttl(timeout)
if result is None:
break
except StopRequested:
break
job, queue = result
self.execute_job(job, queue)
finally:
if not self.is_horse:
self.register_death()
return self.did_perform_work
def work(self, burst=False, logging_level=logging.INFO):
"""
Spawning a greenlet to be able to kill it when it's blocked dequeueing job
:param burst: if it's burst worker don't need to spawn a greenlet
"""
# If the is a burst worker it's not needed to spawn greenlet
if burst:
return self._work(burst, logging_level=logging_level)
self.gevent_worker = gevent.spawn(self._work, burst)
self.gevent_worker.join()
return self.gevent_worker.value
def execute_job(self, job, queue):
def job_done(child):
self.children.remove(child)
self.did_perform_work = True
self.heartbeat()
if job.get_status() == JobStatus.FINISHED:
queue.enqueue_dependents(job)
child_greenlet = self.gevent_pool.spawn(self.perform_job, job, queue)
child_greenlet.link(job_done)
self.children.append(child_greenlet)
def dequeue_job_and_maintain_ttl(self, timeout):
if self._stop_requested:
raise StopRequested()
result = None
while True:
if self._stop_requested:
raise StopRequested()
self.heartbeat()
if self.gevent_pool.full():
self.set_state(WorkerStatus.BUSY)
self.log.warning(
"RQ GEVENT worker greenlet pool empty current size %s",
self.gevent_pool.size,
)
while self.gevent_pool.full():
gevent.sleep(0.1)
if self._stop_requested:
raise StopRequested()
try:
result = self.queue_class.dequeue_any(
self.queues, timeout, connection=self.connection
)
self.set_state(WorkerStatus.IDLE)
if result is not None:
job, queue = result
self.log.info(
"%s: %s (%s)"
% (green(queue.name), blue(job.description), job.id)
)
break
except DequeueTimeout:
pass
self.heartbeat()
return result
def main():
import sys
from rq.cli import worker as rq_main
if "-w" in sys.argv or "--worker-class" in sys.argv:
print(
"You cannot specify worker class when using this script,"
"use the official rqworker instead"
)
sys.exit(1)
sys.argv.extend(["-w", "rq_gevent_worker.GeventWorker"])
rq_main()
|
Dallinger/Dallinger
|
dallinger/heroku/rq_gevent_worker.py
|
Python
|
mit
| 7,652
|
"""
FLI.focuser.py
Object-oriented interface for handling FLI (Finger Lakes Instrumentation) USB
focusers
author: Craig Wm. Versek, Yankee Environmental Systems
author_email: cwv@yesinc.com
"""
__author__ = 'Craig Wm. Versek'
__date__ = '2012-08-16'
from ctypes import byref, c_char, c_char_p, c_long, c_ubyte, c_double
from lib import FLILibrary, FLIError, FLIWarning, flidomain_t, flidev_t,\
fliframe_t, FLIDOMAIN_USB, FLIDEVICE_FOCUSER,\
FLI_TEMPERATURE_INTERNAL, FLI_TEMPERATURE_EXTERNAL
from device import USBDevice
###############################################################################
DEBUG = False
###############################################################################
class USBFocuser(USBDevice):
#load the DLL
_libfli = FLILibrary.getDll(debug=DEBUG)
_domain = flidomain_t(FLIDOMAIN_USB | FLIDEVICE_FOCUSER)
def __init__(self, dev_name, model):
USBDevice.__init__(self, dev_name = dev_name, model = model)
self.stepper_position = None
extent = c_long()
self._libfli.FLIGetFocuserExtent(self._dev, byref(extent))
self.stepper_max_extent = extent.value
def get_steps_remaining(self):
steps = c_long()
self._libfli.FLIGetStepsRemaining(self._dev, byref(steps))
return steps.value
def step_motor(self, steps, blocking=True, force=False):
if not force:
if self.get_steps_remaining() > 0:
raise FLIError("""'step_motor' command ignored because motor is still moving! Use force=True to bypass."""
)
if self.stepper_position is None:
self.get_stepper_position()
end_pos = self.stepper_position + steps
if end_pos > self.stepper_max_extent:
raise FLIError("""'step_motor' command ignored because user tried to drive stepper motor to end position %d, which is beyond its max exent, %d. Use force=True to bypass"""
% (end_pos, self.stepper_max_extent)
)
if blocking:
self._libfli.FLIStepMotor(self._dev, c_long(steps))
return self.get_stepper_position()
else:
self.stepper_position = None
self._libfli.FLIStepMotorAsync(self._dev, c_long(steps))
return None
def get_stepper_position(self):
pos = c_long()
self._libfli.FLIGetStepperPosition(self._dev, byref(pos))
self.stepper_position = pos.value
return pos.value
def home_focuser(self):
self._libfli.FLIHomeFocuser(self._dev)
return self.get_stepper_position()
def read_internal_temperature(self):
temp = c_double()
self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_INTERNAL, byref(temp))
return temp.value
def read_external_temperature(self):
temp = c_double()
self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_EXTERNAL, byref(temp))
return temp.value
###############################################################################
# TEST CODE
###############################################################################
if __name__ == "__main__":
focs = USBFocuser.find_devices()
foc0 = focs[0]
|
cversek/python-FLI
|
src/FLI/focuser.py
|
Python
|
mit
| 3,382
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import json
import os
from cosmo_tester.test_suites.test_blueprints.hello_world_bash_test \
import clone_hello_world
from cosmo_tester.framework.testenv import TestCase, bootstrap, teardown
EXECUTION_TIMEOUT = 120
def setUp():
bootstrap()
def tearDown():
teardown()
class SnapshotsHelloWorldTest(TestCase):
def _assert_manager_clean(self):
state = self.get_manager_state()
for v in state.itervalues():
self.assertFalse(bool(v), 'Manager is not clean')
def _assert_manager_state(self, blueprints_ids, deployments_ids):
state = self.get_manager_state()
self.assertEquals(set(blueprints_ids), set(state['blueprints'].keys()))
self.assertEquals(set(deployments_ids),
set(state['deployments'].keys()))
def setUp(self):
super(SnapshotsHelloWorldTest, self).setUp()
self._assert_manager_clean()
self.repo_dir = clone_hello_world(self.workdir)
self.blueprint_yaml = os.path.join(self.repo_dir, 'blueprint.yaml')
self.counter = 0
def tearDown(self):
state = self.get_manager_state()
for d in state['deployments']:
self.wait_until_all_deployment_executions_end(d)
self.client.deployments.delete(d)
for b in state['blueprints']:
self.client.blueprints.delete(b)
for snapshot in self.client.snapshots.list():
self.client.snapshots.delete(snapshot.id)
super(SnapshotsHelloWorldTest, self).tearDown()
def _deploy(self, deployment_id, blueprint_id=None):
if blueprint_id is None:
blueprint_id = deployment_id
self.upload_blueprint(blueprint_id)
inputs = {
'agent_user': self.env.cloudify_agent_user,
'image': self.env.ubuntu_trusty_image_name,
'flavor': self.env.flavor_name
}
self.create_deployment(blueprint_id, deployment_id, inputs=inputs)
self.wait_until_all_deployment_executions_end(deployment_id)
def _delete(self, deployment_id, blueprint_id=None):
if blueprint_id is None:
blueprint_id = deployment_id
self.client.deployments.delete(deployment_id)
self.client.blueprints.delete(blueprint_id)
def _uuid(self):
self.counter += 1
return '{0}_{1}'.format(self.test_id, self.counter)
def _create_snapshot(self, snapshot_id):
self.logger.info('Creating snapshot {0}'.format(snapshot_id))
execution = self.client.snapshots.create(snapshot_id,
include_metrics=False,
include_credentials=False)
self.wait_for_execution(execution, timeout=EXECUTION_TIMEOUT)
def _restore_snapshot(self, snapshot_id, force=False, assert_success=True):
self.logger.info('Restoring snapshot {0}'.format(snapshot_id))
execution = self.client.snapshots.restore(snapshot_id, force=force)
self.wait_for_execution(execution, timeout=EXECUTION_TIMEOUT,
assert_success=assert_success)
return self.client.executions.get(execution_id=execution.id)
def _restore_snapshot_failure_expected(self, snapshot_id, force=False):
old_state = self.get_manager_state()
execution = self._restore_snapshot(snapshot_id, force=force,
assert_success=False)
self.assertEquals(execution.status, 'failed')
new_state = self.get_manager_state()
for k in ['blueprints', 'deployments', 'nodes']:
error_msg = ('State changed for key {0}\nBefore:\n'
'{1}\nAfter:\n{2}').format(
k,
json.dumps(old_state[k], indent=2),
json.dumps(new_state[k], indent=2)
)
self.assertEquals(old_state[k], new_state[k], error_msg)
self.logger.info('Restoring snapshot {0} failed as expected'.format(
snapshot_id))
return execution
def test_simple(self):
dep = self._uuid()
self._deploy(dep)
self._create_snapshot(dep)
self._delete(dep)
self._assert_manager_clean()
self._restore_snapshot(dep)
self._assert_manager_state(blueprints_ids={dep},
deployments_ids={dep})
self.client.snapshots.delete(dep)
self._delete(dep)
def test_not_clean(self):
dep = self._uuid()
self._deploy(dep)
self._create_snapshot(dep)
self._restore_snapshot_failure_expected(dep)
def test_force_with_conflict(self):
dep = self._uuid()
snapshot = self._uuid()
self._deploy(dep)
self._create_snapshot(snapshot)
execution = self._restore_snapshot_failure_expected(snapshot,
force=True)
self.assertIn(dep, execution.error)
def test_force_with_deployment_conflict(self):
deployment = self._uuid()
blueprint = self._uuid()
snapshot = self._uuid()
self._deploy(deployment_id=deployment, blueprint_id=blueprint)
self._assert_manager_state(blueprints_ids={blueprint},
deployments_ids={deployment})
self._create_snapshot(snapshot)
self._delete(deployment_id=deployment, blueprint_id=blueprint)
new_blueprint = self._uuid()
self._deploy(deployment_id=deployment, blueprint_id=new_blueprint)
self._assert_manager_state(blueprints_ids={new_blueprint},
deployments_ids={deployment})
execution = self._restore_snapshot_failure_expected(snapshot,
force=True)
self.assertIn(deployment, execution.error)
def test_force_with_blueprint_conflict(self):
blueprint = self._uuid()
self.upload_blueprint(blueprint)
snapshot = self._uuid()
self._create_snapshot(snapshot)
execution = self._restore_snapshot_failure_expected(snapshot,
force=True)
self.assertIn(blueprint, execution.error)
def test_force_no_conflict(self):
dep = self._uuid()
self._deploy(dep)
self._create_snapshot(dep)
self._delete(dep)
self._assert_manager_clean()
dep2 = self._uuid()
self._deploy(dep2)
self._restore_snapshot(dep, force=True)
self._assert_manager_state(blueprints_ids={dep, dep2},
deployments_ids={dep, dep2})
self.client.snapshots.delete(dep)
self._delete(dep)
self._delete(dep2)
|
codilime/cloudify-system-tests
|
cosmo_tester/test_suites/test_snapshots/snapshots_single_manager_helloworld_test.py
|
Python
|
apache-2.0
| 7,453
|
from skoolkittest import SkoolKitTestCase
from skoolkit.textutils import find_unquoted, split_unquoted, partition_unquoted, split_quoted
TEST_FIND = (
# args, kwargs, result
(('abc', 'b'), {}, 1),
(('abc', 'd'), {}, 3),
(('abc', 'd'), {'neg': True}, -1),
(('abc', 'c'), {'end': 1}, 1),
(('abc', 'c'), {'end': 1, 'neg': True}, -1),
(('abc', 'b'), {'start': 1}, 1),
(('abc', 'a'), {'start': 1}, 3),
(('abc', 'a'), {'start': 1, 'neg': True}, -1),
(('c",",d', ','), {}, 4),
(('abc","d', ','), {}, 7),
(('abc","d', ','), {'neg': True}, -1),
(('c",",d', ','), {'end': 3}, 3),
(('c",",d', ','), {'end': 3, 'neg': True}, -1),
(('c",",d', ','), {'start': 1}, 4),
(('c",",d', ','), {'start': 2}, 2),
(('c",",d', ','), {'start': 3}, 6),
(('c",",d', ','), {'start': 5}, 6),
(('c",",d', ','), {'start': 5, 'neg': True}, -1),
(('e,";",f;g', ';'), {'start': 6}, 7),
)
TEST_SPLIT = (
# args, result
(('ab', ','), ['ab']),
(('a,b', ','), ['a', 'b']),
(('a":"b', ':'), ['a":"b']),
(('a":"b:c', ':'), ['a":"b', 'c']),
(('a-b-c', '-', 1), ['a', 'b-c']),
(('a"-"b-c-d', '-', 1), ['a"-"b', 'c-d'])
)
TEST_PARTITION = (
# args, result
(('ab', ','), ('ab', '', '')),
(('a,b', ','), ('a', ',', 'b')),
(('a":"b', ':'), ('a":"b', '', '')),
(('a":"b:c', ':'), ('a":"b', ':', 'c')),
(('a":"b', ':', 'x'), ('a":"b', '', 'x')),
(('a":"b:c', ':', 'x'), ('a":"b', ':', 'c'))
)
TEST_SPLIT_QUOTED = (
# text, result
('a', ['a']),
('"', ['"']),
('"...', ['"...']),
('""', ['""']),
('"":3', ['""', ':3']),
('"a"', ['"a"']),
('"a"+4', ['"a"', '+4']),
('"b"+5+"c"', ['"b"', '+5+', '"c"']),
('6+"d"+7', ['6+', '"d"', '+7']),
('"e"-"f"', ['"e"', '-', '"f"']),
(r'"\""', [r'"\""']),
(r'"\""*10', [r'"\""', '*10']),
(r'"\\"+1', [r'"\\"', '+1']),
(r'\"abc","d"', ['\\', '"abc"', ',', '"d"'])
)
class TextUtilsTest(SkoolKitTestCase):
def setUp(self):
SkoolKitTestCase.setUp(self)
self.longMessage = True
def _test_function(self, exp_result, func, *args, **kwargs):
args_str = ', '.join([repr(a) for a in args])
kwargs_str = ', '.join(['{}={!r}'.format(k, v) for k, v in kwargs.items()])
if kwargs_str:
kwargs_str = ', ' + kwargs_str
msg = "{}({}{}) failed".format(func.__name__, args_str, kwargs_str)
self.assertEqual(exp_result, func(*args, **kwargs), msg)
def test_find_unquoted(self):
for args, kwargs, exp_result in TEST_FIND:
self._test_function(exp_result, find_unquoted, *args, **kwargs)
def test_split_unquoted(self):
for args, exp_result in TEST_SPLIT:
self._test_function(exp_result, split_unquoted, *args)
def test_partition_unquoted(self):
for args, exp_result in TEST_PARTITION:
self._test_function(exp_result, partition_unquoted, *args)
def test_split_quoted(self):
for text, exp_result in TEST_SPLIT_QUOTED:
self._test_function(exp_result, split_quoted, text)
|
skoolkid/skoolkit
|
tests/test_textutils.py
|
Python
|
gpl-3.0
| 3,109
|
"""PyDbLite.py adapted for MySQL backend
Differences with PyDbLite:
- pass the connection to the MySQL db as argument to Base()
- in create(), field definitions must specify a type
- no index
- the Base() instance has a cursor attribute, so that SQL requests
can be executed :
db.cursor.execute(an_sql_request)
result = db.cursor.fetchall()
Fields must be declared
Syntax :
from PyDbLite.MySQL import Base
import MySQLdb
# connect to a MySQL server and use database "test"
connection = MySQLdb.connect("localhost","root","admin")
connection.cursor().execute("USE test")
# pass the connection as argument to Base creation
db = Base('dummy',connection)
# create new base with field names
db.create(('name','INTEGER'),('age',"INTEGER'),('size','REAL'))
# existing base
db.open()
# insert new record
db.insert(name='homer',age=23,size=1.84)
# records are dictionaries with a unique integer key __id__
# selection by list comprehension
res = [ r for r in db if 30 > r['age'] >= 18 and r['size'] < 2 ]
# or generator expression
for r in (r for r in db if r['name'] in ('homer','marge') ):
# simple selection (equality test)
res = db(age=30)
# delete a record or a list of records
db.delete(one_record)
db.delete(list_of_records)
# delete a record by its id
del db[rec_id]
# direct access by id
record = db[rec_id] # the record such that record['__id__'] == rec_id
# update
db.update(record,age=24)
# add and drop fields
db.add_field('new_field')
db.drop_field('name')
# save changes on disk
db.commit()
"""
import os
import cPickle
import bisect
import MySQLdb
# compatibility with Python 2.3
try:
set([])
except NameError:
from sets import Set as set
class Base:
def __init__(self,basename,connection):
"""basename = name of the PyDbLite database = a MySQL table
connection = a connection to a MySQL database"""
self.name = basename
self.conn = connection
self.cursor = connection.cursor()
self._iterating = False
def create(self,*fields,**kw):
"""Create a new base with specified field names
A keyword argument mode can be specified ; it is used if a file
with the base name already exists
- if mode = 'open' : open the existing base, ignore the fields
- if mode = 'override' : erase the existing base and create a
new one with the specified fields"""
self.mode = mode = kw.get("mode",None)
if self._table_exists():
if mode == "override":
self.cursor.execute("DROP TABLE %s" %self.name)
elif mode == "open":
return self.open()
else:
raise IOError,"Base %s already exists" %self.name
self.fields = [ f[0] for f in fields ]
self.all_fields = ["__id__","__version__"]+self.fields
_types = ["INTEGER PRIMARY KEY AUTO_INCREMENT","INTEGER"] + \
[f[1] for f in fields]
f_string = [ "%s %s" %(f,t) for (f,t) in zip(self.all_fields,_types)]
sql = "CREATE TABLE %s (%s)" %(self.name,
",".join(f_string))
self.cursor.execute(sql)
return self
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
self._get_table_info()
return self
# table not found
raise IOError,"Table %s doesn't exist" %self.name
def _table_exists(self):
"""Database-specific method to see if the table exists"""
self.cursor.execute("SHOW TABLES")
for table in self.cursor.fetchall():
if table[0].lower() == self.name.lower():
return True
return False
def _get_table_info(self):
"""Database-specific method to get field names"""
self.cursor.execute('DESCRIBE %s' %self.name)
self.all_fields = [ f[0] for f in self.cursor.fetchall() ]
self.fields = self.all_fields[2:]
def commit(self):
"""No use here ???"""
pass
def insert(self,*args,**kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f,arg) for f,arg in zip(self.all_fields[2:],args)])
kw["__version__"] = 0
vals = self._make_sql_params(kw)
sql = "INSERT INTO %s SET %s" %(self.name,",".join(vals))
res = self.cursor.execute(sql)
self.cursor.execute("SELECT LAST_INSERT_ID()")
__id__ = self.cursor.fetchone()[0]
return __id__
def delete(self,removed):
"""Remove a single record, or the records in an iterable
Before starting deletion, test if all records are in the base
and don't have twice the same __id__
Return the number of deleted items
"""
if isinstance(removed,dict):
# remove a single record
removed = [removed]
else:
# convert iterable into a list (to be able to sort it)
removed = [ r for r in removed ]
if not removed:
return 0
_ids = [ r['__id__'] for r in removed ]
_ids.sort()
sql = "DELETE FROM %s WHERE __id__ IN (%s)" %(self.name,
",".join([str(_id) for _id in _ids]))
self.cursor.execute(sql)
return len(removed)
def update(self,record,**kw):
"""Update the record with new keys and values"""
# increment version number
kw["__version__"] = record["__version__"] + 1
vals = self._make_sql_params(kw)
sql = "UPDATE %s SET %s WHERE __id__=%s" %(self.name,
",".join(vals),record["__id__"])
self.cursor.execute(sql)
def _make_sql_params(self,kw):
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types"""
vals = []
for k,v in kw.iteritems():
vals.append('%s=%s' %(k,self._conv(v)))
return vals
def _conv(self,v):
if isinstance(v,str):
v = v.replace('"','""')
return '"%s"' %v
elif isinstance(v,datetime.date):
return v.strftime("%Y%m%d")
else:
return v
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
return dict(zip(self.all_fields,row))
def add_field(self,field,default=None):
fname,ftype = field
if fname in self.all_fields:
raise ValueError,'Field "%s" already defined' %fname
sql = "ALTER TABLE %s ADD %s %s" %(self.name,fname,ftype)
if default is not None:
sql += " DEFAULT %s" %self._conv(default)
self.cursor.execute(sql)
self.commit()
self._get_table_info()
def drop_field(self,field):
if field in ["__id__","__version__"]:
raise ValueError,"Can't delete field %s" %field
if not field in self.fields:
raise ValueError,"Field %s not found in base" %field
sql = "ALTER TABLE %s DROP %s" %(self.name,field)
self.cursor.execute(sql)
self._get_table_info()
def __call__(self,**kw):
"""Selection by field values
db(key=value) returns the list of records where r[key] = value"""
for key in kw:
if not key in self.all_fields:
raise ValueError,"Field %s not in the database" %key
vals = self._make_sql_params(kw)
sql = "SELECT * FROM %s WHERE %s" %(self.name,",".join(vals))
self.cursor.execute(sql)
return [self._make_record(row) for row in self.cursor.fetchall() ]
def __getitem__(self,record_id):
"""Direct access by record id"""
sql = "SELECT * FROM %s WHERE __id__=%s" %(self.name,record_id)
self.cursor.execute(sql)
res = self.cursor.fetchone()
if res is None:
raise IndexError,"No record at index %s" %record_id
else:
return self._make_record(res)
def __len__(self):
return len(self.records)
def __delitem__(self,record_id):
"""Delete by record id"""
self.delete(self[record_id])
def __iter__(self):
"""Iteration on the records"""
self.cursor.execute("SELECT * FROM %s" %self.name)
results = [ self._make_record(r) for r in self.cursor.fetchall() ]
return iter(results)
if __name__ == '__main__':
connection = MySQLdb.connect("localhost","root","admin")
cursor = connection.cursor()
cursor.execute("USE test")
db = Base("pydbtest",connection).create(("name","TEXT"),("age","INTEGER"),
("size","REAL"),("birth","DATE"),
mode="override")
try:
db.add_field(("name","TEXT"))
except:
pass
import random
import datetime
names = ['pierre','claire','simon','camille','jean',
'florence','marie-anne']
#db = Base('PyDbLite_test')
#db.create('name','age','size','birth',mode="override")
for i in range(1000):
db.insert(name=random.choice(names),
age=random.randint(7,47),size=random.uniform(1.10,1.95),
birth=datetime.date(1990,10,10))
db.commit()
print 'Record #20 :',db[20]
print '\nRecords with age=30 :'
for rec in [ r for r in db if r["age"]==30 ]:
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print "\nSame with __call__"
# same with select
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print [ r for r in db if r["age"]==30 ] == db(age=30)
raw_input()
db.insert(name=random.choice(names)) # missing fields
print '\nNumber of records with 30 <= age < 33 :',
print sum([1 for r in db if 33 > r['age'] >= 30])
print db.delete([])
d = db.delete([r for r in db if 32> r['age'] >= 30 and r['name']==u'pierre'])
print "\nDeleting %s records with name == 'pierre' and 30 <= age < 32" %d
print '\nAfter deleting records '
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print '\n',sum([1 for r in db]),'records in the database'
print '\nMake pierre uppercase for age > 27'
for record in ([r for r in db if r['name']=='pierre' and r['age'] >27]) :
db.update(record,name="Pierre")
print len([r for r in db if r['name']=='Pierre']),'Pierre'
print len([r for r in db if r['name']=='pierre']),'pierre'
print len([r for r in db if r['name'] in ['pierre','Pierre']]),'p/Pierre'
print 'is unicode :',isinstance(db[20]['name'],unicode)
db.commit()
db.open()
print '\nSame operation after commit + open'
print len([r for r in db if r['name']=='Pierre']),'Pierre'
print len([r for r in db if r['name']=='pierre']),'pierre'
print len([r for r in db if r['name'] in ['pierre','Pierre']]),'p/Pierre'
print 'is unicode :',isinstance(db[20]['name'],unicode)
print "\nDeleting record #21"
del db[21]
if not 21 in db:
print "record 21 removed"
print db[22]
db.drop_field('name')
print db[22]
db.add_field(('adate',"DATE"),datetime.date.today())
print db[22]
|
leleobhz/scripts
|
python/chat_back_machine/engine/PyDbLite/MySQL.py
|
Python
|
gpl-2.0
| 11,904
|
# py.test --cov=xcs C:\Users\Metron\Documents\GitHub\pyalcs\tests\lcs\agents\xncs
|
ParrotPrediction/pyalcs
|
tests/lcs/agents/xcs/__init__.py
|
Python
|
mit
| 82
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Needs to stay compatible with Python 2.5 due to GAE.
#
# Copyright 2007 Google Inc. All Rights Reserved.
__version__ = '3.0.0a4.dev0'
|
google/mysql-protobuf
|
protobuf/python/google/protobuf/__init__.py
|
Python
|
gpl-2.0
| 1,768
|
#coding: utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
from kasaya.conf import settings
from kasaya.core.protocol import messages
from kasaya.core import exceptions
#from kasaya.core.lib.binder import get_bind_address
from kasaya.core.protocol.comm import MessageLoop, send_and_receive_response
from kasaya.core.lib.control_tasks import ControlTasks, RedirectRequiredToAddr
from kasaya.core.lib import LOG, servicesctl
from kasaya.core.events import add_event_handler, emit
#from kasaya.workers.kasayad.pong import PingDB
from datetime import datetime, timedelta
import gevent
from signal import SIGKILL, SIGTERM
from os import kill
import random
__all__=("SyncWorker",)
def _worker_addr( wrkr ):
return "tcp://%s:%i" % (wrkr['ip'],wrkr['port'])
class RedirectRequiredEx(RedirectRequiredToAddr):
def __init__(self, host_id):
self.remote_id = host_id
class SyncWorker(object):
def __init__(self, server, database):
self.DAEMON = server
self.DB = database
#self.BC = broadcaster
#self.pinger = PingDB()
# bind events
add_event_handler("worker-local-start", self.worker_start_local )
add_event_handler("worker-local-stop", self.worker_stop_local )
add_event_handler("worker-local-wait", self.worker_prepare )
add_event_handler("worker-remote-join", self.worker_start_remote )
add_event_handler("worker-remote-leave", self.worker_stop_remote )
add_event_handler("connection-end", self.handle_connection_end )
# cache
self.__services = None
# kasayad <--> kasayad communication
self.intersync = MessageLoop( 'tcp://0.0.0.0:'+str(settings.KASAYAD_CONTROL_PORT) )
self.intersync.register_message(messages.CTL_CALL, self.handle_global_control_request)
# local worker <-> kasayad dialog on public port
self.intersync.register_message(messages.WORKER_LIVE, self.handle_worker_live)
self.intersync.register_message(messages.WORKER_LEAVE, self.handle_worker_leave)
self.intersync.register_message(messages.QUERY, self.handle_name_query, raw_msg_response=True)
# service control tasks
self.ctl = ControlTasks(allow_redirect=True)
self.ctl.register_task("svbus.status", self.CTL_global_services)
self.ctl.register_task("worker.stop", self.CTL_worker_stop)
self.ctl.register_task("worker.stats", self.CTL_worker_stats)
self.ctl.register_task("worker.exists", self.CTL_worker_exists)
self.ctl.register_task("service.start", self.CTL_service_start)
self.ctl.register_task("service.stop", self.CTL_service_stop)
self.ctl.register_task("host.rescan", self.CTL_host_rescan)
@property
def replaces_broadcast(self):
return self.DB.replaces_broadcast
@property
def own_ip(self):
return self.intersync.ip
@property
def own_addr(self):
"""
Own network address
"""
return self.intersync.address
# closing and quitting
def stop(self):
#self.local_input.stop()
#self.queries.stop()
pass
def close(self):
#self.local_input.close()
#self.queries.close()
pass
# all message loops used in kasayad
def get_loops(self):
return [
#self.pinger.loop,
self.intersync.loop,
]
# local services management
# -------------------------
#def local_services_scan(self):
def local_services_list(self, rescan=False):
"""
List of available local services.
If rescan bring changes, then database and broadcast will be triggered.
"""
scan = rescan or (self.__services is None)
if scan:
self.__services = servicesctl.local_services()
lst = self.__services.keys()
if rescan:
ID = self.DAEMON.ID
changes = self.DB.service_update_list(ID, lst)
if changes:
self.DAEMON.notify_kasayad_refresh(ID, lst, local=True)
return lst
def get_service_ctl(self, name):
"""
Return ServiceCtl object for given service name
"""
return self.__services[name]
# local message handlers
# -----------------------------------
def handle_worker_live(self, msg):
"""
Receive worker's ping singnal.
This function is triggered only by local worker.
"""
{
u'status': 1,
u'addr': u'tcp://0.0.0.0:5000',
u'service': u'locka',
u'pid': 6222,
u'id': u'WLUKJGE5AOKF4E'
}
wrkr = self.DB.worker_get(msg['id'])
if wrkr is None:
# new local worker just started
emit("worker-local-start", msg['id'], msg['addr'], msg['service'], msg['pid'] )
return
if (msg['addr']!=wrkr['addr']) or (msg['service']!=wrkr['service']):
# worker properties are different, assume that
# old worker died silently and new appears under same ID
# (it's just impossible!)
emit("worker-local-stop", worker_id )
return
def handle_connection_end(self, addr, ssid):
"""
This is event handler for connection-end.
"""
if ssid==None: return
# unexpected connection lost with local worker
wrkr = self.DB.worker_get(ssid)
if wrkr is not None:
emit("worker-local-stop", ssid )
def handle_worker_leave(self, msg):
"""
Local worker is going down,
generate event worker-local-stop
"""
emit("worker-local-stop", msg['id'] )
def handle_name_query(self, msg):
"""
Odpowiedź na pytanie o adres workera
"""
name = msg['service']
addr = self.DB.choose_worker_for_service(name)
if not addr is None:
addr = addr['addr']
return {
'message':messages.WORKER_ADDR,
'service':name,
'addr':addr,
}
def handle_local_control_request(self, msg):
"""
control requests from localhost
"""
result = self.ctl.handle_request(msg)
return result
# worker state changes, high level functions
# ------------------------------------------
def worker_prepare(self, worker_id):
"""
After start, worker is in offline state.
It need to be configured and after then it can be activated to be online.
This function make all required things and when worker is online it broadcast new worker in network.
"""
wrknfo = self.DB.worker_get(worker_id)
# all configuration of worker should be there
pass
# send information to worker to start processing tasks
msg = {
'message':messages.CTL_CALL,
'method':'start'
}
res = send_and_receive_response(wrknfo['addr'], msg)
LOG.debug("Local worker [%s] on [%s] is now online" % (wrknfo['service'], wrknfo['addr']) )
# broadcast new worker state
self.DB.worker_set_state( worker_id, True )
#self.BC.broadcast_worker_live(self.DAEMON.ID, worker_id, wrknfo['addr'], wrknfo['service'])
def worker_start_local(self, worker_id, address, service, pid):
"""
Local worker started
"""
self.DB.worker_register(self.DAEMON.ID, worker_id, service, address, pid, False)
LOG.info("Local worker [%s] started, address [%s] [id:%s]" % (service, address, worker_id) )
# emit signal
emit("worker-local-wait", worker_id)
def worker_start_remote(self, worker_id, host_id, address, service):
"""
Remote worker started
"""
self.DB.worker_register(host_id, worker_id, service, address)
LOG.info("Remote worker [%s] started, address [%s] [id:%s]" % (service, address, worker_id) )
def worker_stop_local(self, worker_id):
"""
Local worker stopped
"""
self.DB.worker_unregister(ID=worker_id)
LOG.info("Local worker stopped [id:%s]" % worker_id )
#self.BC.broadcast_worker_stop(worker_id)
def worker_stop_remote(self, worker_id):
"""
Remote worker stopped
"""
self.DB.worker_unregister(ID=worker_id)
LOG.info("Remote worker stopped [id:%s]" % worker_id )
# heartbeat
def hearbeat_loop(self):
"""
Periodically check all locally registered workers ping time. Unregister dead workers
"""
maxpinglife = timedelta( seconds = settings.HEARTBEAT_TIMEOUT + settings.WORKER_HEARTBEAT )
unreglist = []
while True:
now = datetime.now()
for ID, nfo in self.__pingdb.iteritems():
# find outdated timeouts
to = nfo['t'] + maxpinglife
if to<now:
LOG.warning("Worker [%s] with id [%s] died. Unregistering." % (nfo['s'], ID) )
unreglist.append(ID)
# unregister all dead workers
while len(unreglist)>0:
ID = unreglist.pop()
self.worker_stop( ID )
gevent.sleep(settings.WORKER_HEARTBEAT)
# inter sync communication and global management
# -------------------------------------------------
def redirect_or_pass_by_id(self, host_id):
"""
Check if given host id is own host, if not then raise exception
to redirect message to proper destination.
"""
if host_id is None:
raise exceptions.ServiceBusException("Unknown address")
ownip = host_id==self.DAEMON.ID
if not ownip:
raise RedirectRequiredEx(host_id)
def handle_global_control_request(self, msg):
"""
Control requests from remote hosts
"""
result = self.ctl.handle_request(msg)
return result
#return {"message":messages.RESULT, "result":result }
# kasayad host tasks
def CTL_global_services(self):
"""
List of all working hosts and services in network
"""
lst = []
# all kasayad hosts
for hst in self.DB.host_list():
# workers on host
hst ['services'] = self.CTL_services_on_host( hst['id'] )
lst.append( hst )
return lst
# this command is not currently exposed via control interface
def CTL_services_on_host(self, host_id):
"""
List of all services on host
"""
lst = []
# managed services set
managed = set()
for s in self.DB.service_list(host_id):
managed.add(s['service'])
# currently running services
running = set()
for wnfo in self.DB.worker_list(host_id):
running.add(wnfo['service'])
wnfo['running'] = True
wnfo['managed'] = wnfo['service'] in managed
lst.append(wnfo)
# offline services
for sv in managed:
if not sv in running:
lst.append( {'service':sv,'running':False, 'managed':True} )
return lst
def CTL_worker_exists(self, worker_id):
"""
Check if worker with given id is existing
"""
wrkr = self.DB.worker_get(worker_id)
return not wrkr is None
def CTL_worker_stop(self, ID, terminate=False, sigkill=False):
"""
Send stop signal to worker
"""
wrkr = self.DB.worker_get(ID)
self.redirect_or_pass_by_id( wrkr['ip'] )
if terminate:
signal = SIGTERM
if sigkill:
signal = SIGKILL
kill(pid, signal)
return True
else:
addr = _worker_addr(wrkr)
msg = {
'message':messages.CTL_CALL,
'method':'stop'
}
res = send_and_receive_response(self.context, addr, msg)
return res
def CTL_worker_stats(self, ID):
"""
Return full stats of worker
"""
wrkr = self.DB.worker_get(ID)
self.redirect_or_pass_by_id( wrkr['ip'] )
# call worker for stats
addr = _worker_addr(wrkr)
msg = {
'message':messages.CTL_CALL,
'method':'stats'
}
res = send_and_receive_response(self.context, addr, msg)
return res
def CTL_service_start(self, name, ip=None):
"""
Start service on host, or locally if host is not given.
name - name of service to start
ip - ip address of host on which service shoult be started,
if not given then worker will be started on localhost.
"""
if ip is None:
ip = self.own_ip
self.redirect_or_pass_by_id(ip)
try:
svc = self.get_service_ctl(name)
except KeyError:
raise exceptions.ServiceBusException("There is no service [%s] on this host" % name)
svc.start_service()
return True
def CTL_service_stop(self, name, ip=None):
"""
Stop all workers serving given service.
name - name of service to stop
"""
if ip is None:
ip = self.own_ip
self.redirect_or_pass_by_id(ip)
services = []
for wrk in self.DB.worker_list_local():
if wrk['service']==name:
services.append(wrk['ID'])
if len(services)==0:
raise exceptions.ServiceBusException("There is no [%s] service running" % name)
for u in services:
self.CTL_worker_stop(u)
def CTL_host_rescan(self, ip=None):
"""
Rescan services available on local host
"""
if ip is None:
ip = self.own_ip
self.redirect_or_pass_by_id(ip)
svlist = self.local_services_list(rescan=True)
# send nwe list of services to kasaya daemon instance
#self.DAEMON.notify_kasayad_refresh(self.DAEMON.ID, svlist, True)
|
AYAtechnologies/Kasaya-esb
|
kasaya/workers/kasayad/syncworker.py
|
Python
|
bsd-2-clause
| 14,173
|
"""
This is the toy example problem in the SNOPT documentation.
min x1
s.t . x1**2 + 4x2**2 <= 4
(x1-2)**2 + x2**2 <= 5
x1 >=0
We define the function F(x):
F(x) = [ x1 ]
[ x1**2 + 4x2**2 ]
[ (x1-2)**2 + x2**2 ]
with ObjRow = 1 to indicate the objective.
The Jacobian is:
F'(x) = [ 1 0 ] [ 1 0 ] [ 0 0 ]
[ 2x1 8x2 ] = [ 0 0 ] + [ 2x1 8x2 ]
[ 2(x1-2) 2x2 ] [ 0 0 ] [ 2(x1-2) 2x2 ]
linear(A) nonlinear (G)
"""
import numpy as np
import scipy.sparse as sp
from snopt import snopta, SNOPT_options
def sntoya_objF(status,x,needF,F,needG,G):
F[0] = x[1] # objective row
F[1] = x[0]**2 + 4.0*x[1]**2
F[2] = (x[0] - 2.0)**2 + x[1]**2
return status, F, G
def sntoya_objFG(status,x,needF,F,needG,G):
F[0] = x[1] # objective row
F[1] = x[0]**2 + 4.0*x[1]**2
F[2] = (x[0] - 2.0)**2 + x[1]**2
G[0] = 2*x[0]
G[1] = 8*x[1]
G[2] = 2*(x[0]-2)
G[3] = 2*x[1]
return status, F, G
inf = 1.0e20
options = SNOPT_options()
options.setOption('Verbose',True)
options.setOption('Solution print',True)
options.setOption('Print filename','sntoya.out')
options.setOption('Summary frequency',1)
# Name arrays have to be dtype='|S1' and also have to be the
# correct length, else they are ignored by SNOPT:
xnames = np.empty(2,dtype='|S8')
xnames[0] = " x0"
xnames[1] = " x1"
Fnames = np.empty(3,dtype='|S8')
Fnames[0] = " F0"
Fnames[1] = " F1"
Fnames[2] = " F2"
x0 = np.array([ 1.0, 1.0 ])
xlow = np.array([ 0.0, -inf])
xupp = np.array([ inf, inf])
Flow = np.array([ -inf, -inf, -inf ])
Fupp = np.array([ inf, 4.0, 5.0 ])
n = 2
nF = 3
ObjRow = 1
# We first solve the problem without providing derivative info
result = snopta(sntoya_objF,n,nF,x0=x0,
xlow=xlow,xupp=xupp,
Flow=Flow,Fupp=Fupp,
ObjRow=ObjRow,
xnames=xnames,Fnames=Fnames,
name=' sntoyaF',options=options)
# Now we set up the derivative structures...
# A and G provide the linear and nonlinear components of
# the Jacobian matrix, respectively. Here, G and A are given
# as dense matrices.
#
# For the nonlinear components, enter any nonzero value to
# indicate the location of the nonlinear deriatives (in this case, 2).
# A must be properly defined with the correct derivative values.
A = np.array([ [0, 1],
[0, 0],
[0, 0]])
G = np.array([ [0, 0],
[2, 2],
[2, 2]])
# Alternatively, A and G can be input in coordinate form via scipy's
# coordinate matrix
# A = sp.coo_matrix(A)
# G = sp.coo_matrix(G)
# or explicitly in coordinate form as a tuple
# iAfun = row indices of A
# jAvar = col indices of A
# A = matrix values of A
# (A,iAfun,jAvar)
#
# iGfun = row indices of G
# jGvar = col indices of G
# (iGfun,jGvar)
#
result = snopta(sntoya_objFG,n,nF,x0=x0,name='sntoyaFG',xlow=xlow,xupp=xupp,
Flow=Flow,Fupp=Fupp,ObjRow=ObjRow,A=A,G=G,xnames=xnames,Fnames=Fnames)
print(result)
|
snopt/snopt-python
|
examples/sntoya.py
|
Python
|
mit
| 3,256
|
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
compat_HTTPError,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
unescapeHTML,
unsmuggle_url,
update_url_query,
clean_html,
mimetype2ext,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
'timestamp': 1368213670,
'upload_date': '20130510',
'uploader_id': '1589608506001',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
'timestamp': 1344975024,
'upload_date': '20120814',
'uploader_id': '1460825906',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
'timestamp': 1382041798,
'upload_date': '20131017',
'uploader_id': '1130468786001',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
'skip': 'Video gone',
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '3750436379001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'RBTV Old (do not use)',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'timestamp': 1409122195,
'upload_date': '20140827',
'uploader_id': '710858724001',
},
},
{
# playlist with 'videoList'
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
{
# playlist with 'playlistTab' (https://github.com/rg3/youtube-dl/issues/9965)
'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg',
'info_dict': {
'id': '1522758701001',
'title': 'Lesson 08',
},
'playlist_mincount': 10,
},
]
FLV_VCODECS = {
1: 'SORENSON',
2: 'ON2',
3: 'H264',
4: 'VP8',
}
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
data_url = object_doc.attrib.get('data', '')
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return data_url_params.get(name)
params = {}
playerID = find_param('playerID') or find_param('playerId')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# These fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
if videoPlayer is not None:
if isinstance(videoPlayer, list):
videoPlayer = videoPlayer[0]
if not (videoPlayer.isdigit() or videoPlayer.startswith('ref:')):
return None
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC\.createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
return update_url_query(cls._FEDERATED_URL, params)
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'''(?x)
<meta\s+
(?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+
content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2
''', webpage)
if url_m:
url = unescapeHTML(url_m.group('url'))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url or 'idVideo' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query, referer=None):
headers = {}
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
headers['Referer'] = referer
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' in json_data:
playlist_info = json_data['videoList']
playlist_dto = playlist_info['mediaCollectionDTO']
elif 'playlistTabs' in json_data:
playlist_info = json_data['playlistTabs']
playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0]
else:
raise ExtractorError('Empty playlist')
videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_dto['displayName'])
def _extract_video_info(self, video_info):
video_id = compat_str(video_info['id'])
publisher_id = video_info.get('publisherId')
info = {
'id': video_id,
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
'uploader_id': compat_str(publisher_id) if publisher_id else None,
'duration': float_or_none(video_info.get('length'), 1000),
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
}
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(
url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
tbr = int_or_none(rend.get('encodingRate'), 1000)
a_format = {
'format_id': 'http%s' % ('-%s' % tbr if tbr else ''),
'url': url,
'ext': ext,
'filesize': int_or_none(rend.get('size')) or None,
'tbr': tbr,
}
if rend.get('audioOnly'):
a_format.update({
'vcodec': 'none',
})
else:
a_format.update({
'height': int_or_none(rend.get('frameHeight')),
'width': int_or_none(rend.get('frameWidth')),
'vcodec': rend.get('videoCodec'),
})
# m3u8 manifests with remote == false are media playlists
# Not calling _extract_m3u8_formats here to save network traffic
if ext == 'm3u8':
a_format.update({
'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''),
'ext': 'mp4',
'protocol': 'm3u8_native',
})
formats.append(a_format)
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')),
'filesize': int_or_none(video_info.get('FLVFullSize')),
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % video_id)
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}, {
# non numeric ref: prefixed video id
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
'only_matching': True,
}, {
# unavailable video without message but with error_code
'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
# 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url if url.startswith('http') else 'http:' + url)
# Look for embed_in_page embeds [2]
for video_id, account_id, player_id, embed in re.findall(
# According to examples from [3] it's unclear whether video id
# may be optional and what to do when it is
# According to [4] data-video-id may be prefixed with ref:
r'''(?sx)
<video[^>]+
data-video-id=["\'](\d+|ref:[^"\']+)["\'][^>]*>.*?
</video>.*?
<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js
''', webpage):
entries.append(
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
% (account_id, player_id, embed, video_id))
return entries
def _real_extract(self, url):
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
try:
json_data = self._download_json(api_url, video_id, headers={
'Accept': 'application/json;pk=%s' % policy_key
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
raise ExtractorError(
json_data.get('message') or json_data['error_code'], expected=True)
raise
title = json_data['name'].strip()
formats = []
for source in json_data.get('sources', []):
container = source.get('container')
ext = mimetype2ext(source.get('type'))
src = source.get('src')
if ext == 'ism' or container == 'WVM':
continue
elif ext == 'm3u8' or container == 'M2TS':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
if not src:
continue
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
width = int_or_none(source.get('width'))
f = {
'tbr': tbr,
'filesize': int_or_none(source.get('size')),
'container': container,
'ext': ext or container.lower(),
}
if width == 0 and height == 0:
f.update({
'vcodec': 'none',
})
else:
f.update({
'width': width,
'height': height,
'vcodec': source.get('codec'),
})
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'source_preference': 0 if src else -1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
errors = json_data.get('errors')
if not formats and errors:
error = errors[0]
raise ExtractorError(
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
self._sort_formats(formats)
subtitles = {}
for text_track in json_data.get('text_tracks', []):
if text_track.get('src'):
subtitles.setdefault(text_track.get('srclang'), []).append({
'url': text_track['src'],
})
is_live = False
duration = float_or_none(json_data.get('duration'), 1000)
if duration and duration < 0:
is_live = True
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(json_data.get('description')),
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
'duration': duration,
'timestamp': parse_iso8601(json_data.get('published_at')),
'uploader_id': account_id,
'formats': formats,
'subtitles': subtitles,
'tags': json_data.get('tags', []),
'is_live': is_live,
}
|
Dunkas12/BeepBoopBot
|
lib/youtube_dl/extractor/brightcove.py
|
Python
|
gpl-3.0
| 28,139
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# NOTE(danms): Remove this when all the compute_node stuff is
# converted to objects
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
extra = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': None,
'pci_requests': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
extra['numa_topology'] = numa_topology._to_json()
instance.update(kwargs)
instance['extra'] = extra
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = extra
return instance
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
self.tracker.compute_node['pci_device_pools'])
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
def test_update_resource(self):
# change a compute node value to simulate a change
self.tracker.compute_node['local_gb_used'] += 1
expected = copy.deepcopy(self.tracker.compute_node)
self.tracker._update(self.context)
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_no_update_resource(self):
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
# NOTE(danms): PciDeviceStats only supports iteration, so we have to
# listify it before we can examine the contents by index.
pools = list(self.tracker.compute_node['pci_device_pools'])
self.assertEqual(driver.pci_stats[0]['product_id'],
pools[0]['product_id'])
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
Stavitsky/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 61,929
|
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
#from rllab.envs.grid_world_env import GridWorldEnv
from envs.grid_world_2D_env import GridWorld2DEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
env = normalize(GridWorld2DEnv())
policy = CategoricalMLPPolicy(
env_spec=env.spec,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
n_itr=5,
# Uncomment both lines (this and the plot parameter below) to enable plotting
#plot=True,
)
algo.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
# plot=True,
)
|
roboticslab-uc3m/xgnitive
|
programs/drl/grid_2D_world.py
|
Python
|
lgpl-2.1
| 1,063
|
from pulsar.utils.log import lazyproperty
from lux.models import fields
from lux.utils.data import as_tuple
from sqlalchemy.orm.exc import NoResultFound
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return tuple((
mapper.get_property_by_column(column)
for column in mapper.primary_key
))
class Related(fields.Field):
"""Related data represented by a SQLAlchemy `relationship`.
Must be attached to a Schema class whose options includes
a SQLAlchemy model.
:param list columns: Optional column names on related model.
If not provided, the primary key(s) of the related model
will be used.
"""
default_error_messages = {
'invalid': 'Could not deserialize related value {value!r}; '
'expected a dictionary with keys {keys!r}'
}
def __init__(self, column=None, **kwargs):
super().__init__(**kwargs)
self.columns = as_tuple(column)
@property
def related_model(self):
model = self.root.model
field = getattr(model.db_model, self.attribute or self.name)
return field.property.mapper.class_
@property
def session(self):
return self.root.session
@lazyproperty
def related_keys(self):
if self.columns:
return tuple((
self.related_model.__mapper__.columns[column]
for column in self.columns
))
return get_primary_keys(self.related_model)
def _serialize(self, value, attr, obj):
ret = {
prop.key: getattr(value, prop.key, None)
for prop in self.related_keys
}
return ret if len(ret) > 1 else list(ret.values())[0]
def _deserialize(self, value, *args, **kwargs):
if not isinstance(value, dict):
if len(self.related_keys) != 1:
self.fail(
'invalid',
value=value,
keys=[prop.key for prop in self.related_keys]
)
value = {self.related_keys[0].key: value}
query = self.session.query(self.related_model)
try:
if self.columns:
result = query.filter_by(**{
prop.key: value.get(prop.key)
for prop in self.related_keys
}).one()
else:
# Use a faster path if the related key is the primary key.
result = query.get([
value.get(prop.key) for prop in self.related_keys
])
if result is None:
raise NoResultFound
except NoResultFound:
# The related-object DNE in the DB
# but we still want to deserialize it
# ...perhaps we want to add it to the DB later
return self.related_model(**value)
return result
|
quantmind/lux
|
lux/ext/odm/fields.py
|
Python
|
bsd-3-clause
| 3,012
|
import Logger
|
samdmarshall/pyLoggingHelper
|
Logger/__init__.py
|
Python
|
bsd-3-clause
| 13
|
# This is a distutils/py2exe script to build the Windows binary version
# of gambit-enumphc
from distutils.core import setup
import py2exe
setup(console=["enumphc.py"],
data_files=[(".",
[ "phc.exe", "README" ])])
|
chumsley/gambit
|
contrib/scripts/enumpoly/setup.py
|
Python
|
gpl-2.0
| 241
|
"""
Module for declaration of common constants available throughout Open Civic Data code.
"""
import re
DIVISION_ID_REGEX = r'^ocd-division/country:[a-z]{2}(/[^\W\d]+:[\w.~-]+)*$'
JURISDICTION_ID_REGEX = r'^ocd-jurisdiction/country:[a-z]{2}(/[^\W\d]+:[\w.~-]+)*/\w+$'
# helper for making options-only lists
_keys = lambda allopts: [opt[0] for opt in allopts]
"""
Policy on addition of new types here:
Because these lists are strictly enforced in lots of code for the purposes of data quality
we have a fairly liberal policy on amendment.
If a type is needed and is not duplicative of another type, it will be accepted.
At the moment, because of this policy, no method exists to extend these lists, instead we will
strive for them to be comprehensive.
The only exception to this would be translations, which should simply exist as translations of
the display name (2nd attribute).
"""
CONTACT_TYPE_CHOICES = (
('address', 'Postal Address'),
('email', 'Email'),
('url', 'URL'),
('fax', 'Fax'),
('text', 'Text Phone'),
('voice', 'Voice Phone'),
('video', 'Video Phone'),
('pager', 'Pager'),
('textphone', 'Device for people with hearing impairment'),
# NOTE: this list explicitly does not include RFC 6350s 'cell' as that is redundant with
# voice and the distinction will only lead to confusion. contact_detail.note can be
# used to indicate if something is a home, work, cell, etc.
)
CONTACT_TYPES = _keys(CONTACT_TYPE_CHOICES)
JURISDICTION_CLASSIFICATION_CHOICES = (
('government', 'Government'),
('legislature', 'Legislature'),
('executive', 'Executive'),
('school_system', 'School System'),
)
JURISDICTION_CLASSIFICATIONS = _keys(JURISDICTION_CLASSIFICATION_CHOICES)
SESSION_CLASSIFICATION_CHOICES = (
('primary', 'Primary'),
('special', 'Special'),
)
SESSION_CLASSIFICATIONS = _keys(SESSION_CLASSIFICATION_CHOICES)
ORGANIZATION_CLASSIFICATION_CHOICES = (
('legislature', 'Legislature'),
('executive', 'Executive'),
('upper', 'Upper Chamber'),
('lower', 'Lower Chamber'),
('party', 'Party'),
('committee', 'Committee'),
('commission', 'Commission'),
('department', 'Department'),
('office', 'Office'),
('company', 'Company'),
('political action committee', 'Political Action Committee (PAC)')
)
ORGANIZATION_CLASSIFICATIONS = _keys(ORGANIZATION_CLASSIFICATION_CHOICES)
BILL_CLASSIFICATION_CHOICES = (
('bill', 'Bill'),
('resolution', 'Resolution'),
('concurrent resolution', 'Concurrent Resolution'),
('joint resolution', 'Joint Resolution'),
('memorial', 'Memorial'),
('commemoration', 'Commemoration'),
('concurrent memorial', 'Concurrent Memorial'),
('joint memorial', 'Joint Memorial'),
('proposed bill', 'Proposed Bill'),
('proclamation', 'Proclamation'),
('nomination', 'Nomination'),
('contract', 'Contract'),
('claim', 'Claim'),
('appointment', 'Appointment'),
('constitutional amendment', 'Constitutional Amendment'),
('petition', 'Petition'),
('order', 'Order'),
('concurrent order', 'Concurrent Order'),
('appropriation', 'Appropriation'),
('ordinance', 'Ordinance')
)
BILL_CLASSIFICATIONS = _keys(BILL_CLASSIFICATION_CHOICES)
BILL_RELATION_TYPE_CHOICES = (
('companion', 'Companion'), # a companion in another chamber
('prior-session', 'Prior Session'), # an introduction from a prior session
('replaced-by', 'Replaced By'), # a bill has been replaced by another
('replaces', 'Replaces'), # a bill that replaces another
)
BILL_RELATION_TYPES = _keys(BILL_RELATION_TYPE_CHOICES)
BILL_ACTION_CLASSIFICATION_CHOICES = (
('filing', 'Filing'),
('introduction', 'Introduced'),
('reading-1', 'First Reading'),
('reading-2', 'Second Reading'),
('reading-3', 'Third Reading'),
('passage', 'Passage'),
('failure', 'Passage Failure'),
('withdrawal', 'Withdrawal'),
('substitution', 'Substitution'),
('amendment-introduction', 'Amendment Introduction'),
('amendment-passage', 'Amendment Passage'),
('amendment-withdrawal', 'Amendment Withdrawal'),
('amendment-failure', 'Amendment Failure'),
('amendment-amended', 'Amendment Amended'),
('committee-referral', 'Committee Referral'),
('committee-passage', 'Passage from Committee'),
('committee-passage-favorable', 'Favorable Passage from Committee'),
('committee-passage-unfavorable', 'Unfavorable Passage from Committee'),
('committee-failure', 'Failure in Committee'),
('executive-received', 'Received By Executive'),
('executive-signature', 'Signed By Executive'),
('executive-veto', 'Veto By Executive'),
('executive-veto-line-item', 'Line Item Veto By Executive'),
('veto-override-passage', 'Veto Override Passage'),
('veto-override-failure', 'Veto Override Failure'),
)
BILL_ACTION_CLASSIFICATIONS = _keys(BILL_ACTION_CLASSIFICATION_CHOICES)
VOTE_CLASSIFICATION_CHOICES = (
('bill-passage', 'Bill Passage'),
('amendment-passage', 'Amendment Passage'),
('veto-override', 'Veto Override'),
)
VOTE_CLASSIFICATIONS = _keys(VOTE_CLASSIFICATION_CHOICES)
VOTE_OPTION_CHOICES = (
('yes', 'Yes'),
('no', 'No'),
('absent', 'Absent'),
('abstain', 'Abstain'),
('not voting', 'Not Voting'),
('paired', 'Paired'),
('excused', 'Excused'),
# Only for open states.
('other', 'Other'),
)
VOTE_OPTIONS = _keys(VOTE_OPTION_CHOICES)
VOTE_RESULT_CHOICES = (
('pass', 'Pass'),
('fail', 'Fail'),
)
VOTE_RESULTS = _keys(VOTE_RESULT_CHOICES)
DISCLOSURE_CLASSIFICATION_CHOICES = (
('lobbying', 'Lobbying'),
('contributions', 'Contributions'),
('post_employment', 'Post Employment')
)
DISCLOSURE_CLASSIFICATIONS = _keys(DISCLOSURE_CLASSIFICATION_CHOICES)
|
influence-usa/python-opencivicdata-django
|
opencivicdata/common.py
|
Python
|
bsd-3-clause
| 5,815
|
# -*- coding: utf-8 -*-
__all__ = ['bt', 'ps3', 'defs', 'ps3_remote', 'xbmcclient', 'zeroconf']
|
French77/osmc
|
package/mediacenter-eventclients-common-osmc/files/usr/share/pyshared/xbmc/__init__.py
|
Python
|
gpl-2.0
| 96
|
r"""``sphobjinv`` *package execution module*.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
**Author**
Brian Skinn (bskinn@alum.mit.edu)
**File Created**
15 May 2020
**Copyright**
\(c) Brian Skinn 2016-2022
**Source Repository**
https://github.com/bskinn/sphobjinv
**Documentation**
https://sphobjinv.readthedocs.io/en/latest
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import sys
from sphobjinv.cli.core import main
if __name__ == "__main__":
# Spoof so 'help' usage display shows "sphobjinv" and
# not "__main__.py"
sys.argv[0] = "sphobjinv"
sys.exit(main())
|
bskinn/sphobjinv
|
src/sphobjinv/__main__.py
|
Python
|
mit
| 701
|
"""
LTI Provider API endpoint urls.
"""
from django.conf import settings
from django.conf.urls import url
from lti_provider import views
urlpatterns = [
url(
r'^courses/{course_id}/{usage_id}$'.format(
course_id=settings.COURSE_ID_PATTERN,
usage_id=settings.USAGE_ID_PATTERN
),
views.lti_launch, name="lti_provider_launch"),
url(r'^test/?$', views.test_launch, name="lti_provider_test_launch"),
]
|
CredoReference/edx-platform
|
lms/djangoapps/lti_provider/urls.py
|
Python
|
agpl-3.0
| 456
|
import bh_plugin
import re
import sublime
class BracketRemove(bh_plugin.BracketPluginCommand):
def decrease_indent_level(self, edit, row_first, row_last):
"""
Decrease indent level on removal
"""
tab_size = self.view.settings().get("tab_size", 4)
indents = re.compile(r"^(?:\t| {%d}| *)((?:\t| {%d}| )*)([\s\S]*)" % (tab_size, tab_size))
if not self.single_line:
for x in reversed(range(row_first, row_last + 1)):
line = self.view.full_line(self.view.text_point(x, 0))
text = self.view.substr(line)
m = indents.match(text)
if m:
self.view.replace(edit, line, m.group(1) + m.group(2))
def run(self, edit, name, remove_content=False, remove_indent=False, remove_block=False):
"""
Remove the given bracket and adjust its indentation if desired
"""
if remove_content:
self.view.replace(edit, sublime.Region(self.left.begin, self.right.end), "")
else:
row_first = self.view.rowcol(self.left.end)[0] + 1
row_last = self.view.rowcol(self.right.begin)[0] - 1
self.single_line = not row_first <= row_last
if remove_block and not self.single_line:
self.view.replace(edit, self.view.full_line(self.right.toregion()), "")
else:
self.view.replace(edit, self.right.toregion(), "")
if remove_indent:
self.decrease_indent_level(edit, row_first, row_last)
if remove_block and not self.single_line:
self.view.replace(edit, self.view.full_line(self.left.toregion()), "")
else:
self.view.replace(edit, self.left.toregion(), "")
self.left = None
self.right = None
self.nobracket = True
def plugin():
return BracketRemove
|
herove/dotfiles
|
sublime/Packages/BracketHighlighter/bh_modules/bracketremove.py
|
Python
|
mit
| 1,916
|
import glob
import os
from .. import *
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJava(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'), install=True,
*args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from java!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'program.jar'),
])
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from java!\n'
)
@skip_if('gcj' not in test_features, 'skipping gcj tests')
class TestGcj(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'),
extra_env={'JAVAC': os.getenv('GCJ', 'gcj')},
*args, **kwargs)
def test_build(self):
self.build('program')
self.assertOutput([executable('program')], 'hello from java!\n')
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJavaLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java_library'),
install=True, *args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'lib.jar'),
os.path.join(self.libdir, 'program.jar'),
])
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from library!\n'
)
def test_package(self):
self.build('install')
self.configure(
srcdir=os.path.join('languages', 'java_package'), installdir=None,
extra_env={'CLASSPATH': os.path.join(self.libdir, '*')}
)
self.build()
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
|
jimporter/bfg9000
|
test/integration/languages/test_java.py
|
Python
|
bsd-3-clause
| 2,704
|
class PackedBinary(object):
def __init__(self, i=0):
self.id = i
def __repr__(self):
return str(self.id)
def __getitem__(self, v):
b, m = v
return (self.id >> b) & m
def __setitem__(self, b, v):
b, m = b
self.id = (self.id & ~(m << b)) | ((v & m) << b)
def __long__(self):
return self.id
class GID(PackedBinary):
@property
def sequence(self):
return self[0, 0xFFFFF]
@sequence.setter
def sequence(self, value):
self[0, 0xFFFFF] = value
@property
def start_time(self):
return self[20, 0x3FFFFFFF] + 1104537600
@start_time.setter
def start_time(self, value):
self[20, 0x3FFFFFFF] = value - 1104537600
@property
def process(self):
return self[50, 0xF]
@process.setter
def process(self, value):
self[50, 0xF] = value
@property
def box(self):
return self[54, 0x3FF]
@box.setter
def box(self, value):
self[54, 0x3FF] = value
|
lunixbochs/vaporbat
|
vaporbat/steam/ids.py
|
Python
|
mit
| 1,035
|
###########################################################
# #
# This specific module is written by ASHWIN RAVISHANKAR.
# It includes some of the Hermit java classes
# #
###########################################################
import sys
from eddy.core.output import getLogger
LOGGER = getLogger()
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from eddy.core.reasoner import AbstractReasoner
from eddy.core.common import HasThreadingSystem
from jnius import autoclass, cast, detach
class HermitReasoner(AbstractReasoner, HasThreadingSystem):
def __init__(self, spec, session):
super().__init__(spec, session)
self.afwset = set()
#############################################
# SLOTS
#################################
def onErrored(self, exception):
"""
"""
LOGGER.info('exception occured -')
#############################################
# HOOKS
#################################
def dispose(self):
"""
Executed whenever the plugin is going to be destroyed.
"""
detach()
def start(self):
"""
Perform initialization tasks for the plugin.
"""
try:
self.Configuration = autoclass('org.semanticweb.HermiT.Configuration')
self.Reasoner = autoclass('org.semanticweb.HermiT.Reasoner')
self.ReasonerFactory = autoclass('org.semanticweb.HermiT.ReasonerFactory')
self.Explanation = autoclass('org.semanticweb.owl.explanation.api.Explanation')
self.ExplanationGenerator = autoclass('org.semanticweb.owl.explanation.api.ExplanationGenerator')
self.InconsistentOntologyExplanationGeneratorFactory = autoclass(
'org.semanticweb.owl.explanation.impl.blackbox.checker.InconsistentOntologyExplanationGeneratorFactory')
self.BlackBoxExplanation = autoclass('com.clarkparsia.owlapi.explanation.BlackBoxExplanation')
except Exception as e:
self.onErrored(e)
|
ashwingoldfish/eddy
|
eddy/reasoners/HermiT/HermiT.py
|
Python
|
gpl-3.0
| 2,173
|
#!/usr/bin/env python
#coding=utf-8
# Copyright (C) 2011, Alibaba Cloud Computing
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from oss.oss_api import *
from oss.oss_util import *
from oss.oss_xml_handler import *
from aliyunCliParser import aliyunCliParser
import signal
import ConfigParser
from optparse import OptionParser
from optparse import Values
import os
import re
import time
import Queue
import sys
import socket
import shutil
reload(sys)
sys.setdefaultencoding("utf-8")
CMD_LIST = {}
HELP_CMD_LIST = ['--help','-h','help']
ACL_LIST = ['private', 'public-read', 'public-read-write']
OSS_PREFIX = 'oss://'
CONFIGFILE = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
CONFIGSECTION = 'OSSCredentials'
DEFAUL_HOST = "oss.aliyuncs.com"
OSS_HOST = DEFAUL_HOST
ID = ""
KEY = ""
STS_TOKEN = None
TOTAL_PUT = AtomicInt()
PUT_OK = AtomicInt()
PUT_FAIL = AtomicInt()
PUT_SKIP = AtomicInt()
FILE_NUM_TOTAL = AtomicInt()
FILE_NUM_OK = AtomicInt()
GET_OK = AtomicInt()
GET_FAIL = AtomicInt()
GET_SKIP = AtomicInt()
DELETE_OK = AtomicInt()
COPY_OK = AtomicInt()
SEND_BUF_SIZE = 8192
RECV_BUF_SIZE = 1024*1024*10
MAX_OBJECT_SIZE = 5*1024*1024*1024
MAX_RETRY_TIMES = 3
IS_DEBUG = False
ERROR_FILE_LIST = []
AUTO_DUMP_FILE_NUM = 50
RET_OK = 0
RET_FAIL = -1
RET_SKIP = 1
lock = threading.Lock()
HELP = \
'''The valid command as follows::
GetAllBucket
CreateBucket oss://bucket --acl [acl] --location [location]
DeleteBucket oss://bucket
DeleteWholeBucket oss://bucket
GetBucketLocation oss://bucket
PutBucketCors oss://bucket localfile
GetBucketCors oss://bucket
DeleteBucketCors oss://bucket
PutBucketLogging oss://source_bucket oss://target_bucket/[prefix]
GetBucketLogging oss://bucket
DeleteBucketLogging oss://bucket
PutBucketWebsite oss://bucket indexfile [errorfile]
GetBucketWebsite oss://bucket
DeleteBucketWebsite oss://bucket
PutBucketLifeCycle oss://bucket localfile
GetBucketLifeCycle oss://bucket
DeleteBucketLifeCycle oss://bucket
PutBucketReferer oss://bucket --allow_empty_referer true --referer "referer1,referer2,...,refererN"
GetBucketReferer oss://bucket
GetAcl oss://bucket
SetAcl oss://bucket --acl [acl]
allow private, public-read, public-read-write
List oss://bucket/[prefix] [marker] [delimiter] [maxkeys]
oss://bucket/[prefix] --marker xxx --delimiter xxx --maxkeys xxx
MkDir oss://bucket/dirname
ListAllObject oss://bucket/[prefix]
ListAllDir oss://bucket/[prefix]
DeleteAllObject oss://bucket/[prefix] --force false
DownloadAllObject oss://bucket/[prefix] localdir --replace false --thread_num 5
DownloadToDir oss://bucket/[prefix] localdir --replace false --temp_dir xxx --thread_num 5
UploadObjectFromLocalDir localdir oss://bucket/[prefix] --check_point check_point_file --replace false --check_md5 false --thread_num 5
Put oss://bucket/object --content_type [content_type] --headers \"key1:value1#key2:value2\" --check_md5 false
Get oss://bucket/object localfile
MultiGet oss://bucket/object localfile --thread_num 5
Cat oss://bucket/object
Meta oss://bucket/object
Info oss://bucket/object
Copy oss://source_bucket/source_object oss://target_bucket/target_object --headers \"key1:value1#key2:value2\"
CopyLargeFile oss://source_bucket/source_object oss://target_bucket/target_object --part_size 10*1024*1024 --upload_id xxx
CopyBucket oss://source_bucket/[prefix] oss://target_bucket/[prefix] --headers \"key1:value1\" --replace false
Delete oss://bucket/object
SignUrl oss://bucket/object --timeout [timeout_seconds]
CreateLinkFromFile oss://bucket/object object_name_list_file
CreateLink oss://bucket/object object1 object2 ... objectN
GetLinkIndex oss://bucket/object
Options oss://bucket/[object] --origin xxx --method [GET, PUT, DELETE, HEAD, POST]
UploadDisk localdir oss://bucket/[prefix] [--check_point check_point_file --filename filename_file --replace false --content_type xxx --skip_dir false --skip_suffix false --out xxx] --device_id xxx --check_md5 false
Init oss://bucket/object
ListPart oss://bucket/object --upload_id xxx
ListParts oss://bucket
GetAllPartSize oss://bucket
Cancel oss://bucket/object --upload_id xxx
MultiUpload localfile oss://bucket/object --upload_id xxx --thread_num 10 --max_part_num 1000 --check_md5 false
UploadPartFromFile localfile oss://bucket/object --upload_id xxx --part_number xxx
UploadPartFromString oss://bucket/object --upload_id xxx --part_number xxx --data xxx
Config --host oss.aliyuncs.com --accessid accessid --accesskey accesskey --sts_token token
'''
def print_result(cmd, res):
'''
Print HTTP Response if failedd.
'''
try:
if res.status / 100 == 2:
pass
else:
body = res.read()
print "Error Headers:\n"
print res.getheaders()
print "Error Body:\n"
print body[0:1024]
print "Error Status:\n"
print res.status
print cmd, "Failed!"
if res.status == 403:
check_endpoint_error(body)
exit(-1)
except AttributeError:
pass
def format_size(size):
size = float(size)
coeffs = ['K', 'M', 'G', 'T']
coeff = ""
while size > 2048:
size /= 1024
coeff = coeffs.pop(0)
return str("%.2f"%size) + coeff + "B"
def format_utf8(string):
string = smart_code(string)
if isinstance(string, unicode):
string = string.encode('utf-8')
return string
def split_path(path):
if not path.lower().startswith(OSS_PREFIX):
print "%s parameter %s invalid, " \
"must be start with %s" % \
(args[0], args[1], OSS_PREFIX)
sys.exit(1)
pather = path[len(OSS_PREFIX):].split('/')
return pather
def check_upload_id(upload_id):
upload_id_len = 32
if len(upload_id) != upload_id_len:
print "upload_id is a 32-bit string generated by OSS"
print "you can get valid upload_id by init or listparts command"
sys.exit(1)
def check_bucket(bucket):
if len(bucket) == 0:
print "Bucket should not be empty!"
print "Please input oss://bucket"
sys.exit(1)
def check_object(object):
if len(object) == 0:
print "Object should not be empty!"
print "Please input oss://bucket/object"
sys.exit(1)
if object.startswith("/"):
print "object name should not begin with / "
sys.exit(-1)
def check_localfile(localfile):
if not os.path.isfile(localfile):
print "%s is not existed!" % localfile
sys.exit(1)
def check_args(argv, args=None):
if not args:
args = []
if len(args) < argv:
print "%s miss parameters" % args[0]
sys.exit(1)
def check_bucket_object(bucket, object):
check_bucket(bucket)
check_object(object)
def parse_bucket_object(path):
pather = split_path(path)
bucket = ""
object = ""
if len(pather) > 0:
bucket = pather[0]
if len(pather) > 1:
object += '/'.join(pather[1:])
object = smart_code(object)
if object.startswith("/"):
print "object name SHOULD NOT begin with /"
sys.exit(1)
return (bucket, object)
def parse_bucket(path):
bucket = path
if bucket.startswith(OSS_PREFIX):
bucket = bucket[len(OSS_PREFIX):]
tmp_list = bucket.split("/")
if len(tmp_list) > 0:
bucket = tmp_list[0]
return bucket
def check_endpoint_error(xml_string):
try:
xml = minidom.parseString(xml_string)
end_point = get_tag_text(xml, 'Endpoint')
if end_point:
print 'You should send all request to %s' % end_point
except:
pass
def cmd_listing(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
marker = ''
delimiter = ''
maxkeys = 1000
if options.marker:
marker = options.marker
if options.delimiter:
delimiter = options.delimiter
if options.maxkeys:
maxkeys = options.maxkeys
if len(args) == 3:
marker = args[2]
elif len(args) == 4:
marker = args[2]
delimiter = args[3]
elif len(args) >= 5:
marker = args[2]
delimiter = args[3]
maxkeys = args[4]
prefix = smart_code(prefix)
marker = smart_code(marker)
delimiter = smart_code(delimiter)
maxkeys = smart_code(maxkeys)
exclude = options.exclude
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if (res.status / 100) == 2:
body = res.read()
hh = GetBucketXml(body)
(fl, pl) = hh.list()
print "prefix list is: "
for i in pl:
if exclude and i.startswith(exclude):
continue
print i
print "object list is: "
for i in fl:
if len(i) == 7:
try:
if exclude and i[0].startswith(exclude):
continue
print "%16s %6s %8s %s/%s" % (convert_to_localtime(i[1]), format_size((int)(i[3])), i[6], OSS_PREFIX + bucket, i[0])
except:
print "Exception when print :", i
print "\nprefix list number is: %s " % len(pl)
print "object list number is: %s " % len(fl)
return res
def cmd_listparts(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
print "%20s %20s %20s" % ("UploadId", "Path", "InitTime")
for i in get_all_upload_id_list(get_oss(), bucket, object):
print "%20s oss://%s/%s %20s" % (i[1], bucket, i[0], convert_to_localtime(i[2]))
def cmd_getallpartsize(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
total_part_size = 0
print "%5s %20s %20s %s" % ("Number", "UploadId", "Size", "Path")
for i in get_all_upload_id_list(get_oss(), bucket):
upload_id = i[1]
object = i[0]
for i in get_part_list(get_oss(), bucket, object, upload_id):
part_size = (int)(i[2])
total_part_size += part_size
print "%5s %20s %10s oss://%s/%s" % (i[0], upload_id, format_size(part_size), bucket, object)
print "totalsize is: real:%s, format:%s " % (total_part_size, format_size(total_part_size))
def cmd_init_upload(args, options):
check_args(2, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
upload_id = get_upload_id(get_oss(), bucket, object)
print 'Upload Id: %s' % (upload_id)
def cmd_listpart(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
print "%5s %32s %20s %20s" % ("PartNumber".ljust(10), "ETag".ljust(34), "Size".ljust(20), "LastModifyTime".ljust(32))
for i in get_part_list(get_oss(), bucket, object, options.upload_id):
if len(i) >= 4:
print "%s %s %s %s" % (str(i[0]).ljust(10), str(i[1]).ljust(34), str(i[2]).ljust(20), str(i[3]).ljust(32))
def cmd_upload_part_from_file(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
if options.part_number is None:
print "part_number invalid, please set with --part_number=xxx"
sys.exit(1)
res = get_oss().upload_part(bucket, object, localfile, options.upload_id, options.part_number)
return res
def cmd_upload_part_from_string(args, options):
check_args(2, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
if options.part_number is None:
print "part_number invalid, please set with --part_number=xxx"
sys.exit(1)
if options.data is None:
print "data invalid, please set with --data=xxx"
sys.exit(1)
res = get_oss().upload_part_from_string(bucket, object, options.data, options.upload_id, options.part_number)
return res
def cmd_listallobject(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
marker = ""
total_object_num = 0
totalsize = 0
totaltimes = 0
delimiter = ''
maxkeys = '1000'
if options.out:
f = open(options.out, "w")
while 1:
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = i[2]
total_object_num += 1
totalsize += (int)(length)
if options.exclude:
exclude = options.exclude
if object.startswith(exclude):
continue
msg = "%s%s/%s" % (OSS_PREFIX, bucket, object)
print "%16s %6s %s/%s " % (convert_to_localtime(last_modify_time), format_size(length), OSS_PREFIX + bucket, object)
if options.out:
f.write(msg)
f.write("\n")
totaltimes += 1
if len(marker) == 0:
break
if options.out:
f.close()
print "the object list result is saved into %s" % options.out
print "object list number is: %s " % total_object_num
print "totalsize is: real:%s, format:%s " % (totalsize, format_size(totalsize))
print "request times is: %s" % totaltimes
return res
def cmd_listalldir(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
if prefix and not prefix.endswith("/"):
prefix = "%s/" % prefix
marker = ""
total_object_num = 0
totalsize = 0
totaltimes = 0
delimiter = '/'
maxkeys = '1000'
while 1:
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_dir_list_marker_from_xml(body)
for i in tmp_object_list:
if i.endswith("/"):
i = i[:-1]
msg = "%s" % (os.path.basename(i))
print msg
total_object_num += 1
totaltimes += 1
if len(marker) == 0:
break
print "\ncommon prefix list number is: %s " % total_object_num
print "request times is: %s" % totaltimes
return res
def get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, retry_times = MAX_RETRY_TIMES, temp_dir = None):
'''
return RET_OK, RET_FAIL, RET_SKIP
'''
show_bar = False
object = smart_code(object)
tmp_object = object
if object_prefix == object[:len(object_prefix)]:
tmp_object = object[len(object_prefix):]
while 1:
if not tmp_object.startswith("/"):
break
tmp_object = tmp_object[1:]
localfile = os.path.join(local_path, tmp_object)
localfile = smart_code(localfile)
temp_filename = ''
if temp_dir:
temp_filename = get_unique_temp_filename(temp_dir, localfile)
for i in xrange(retry_times):
try:
if os.path.isfile(localfile):
if replace:
os.remove(localfile)
else:
t1 = last_modify_time
t2 = (int)(os.path.getmtime(localfile))
if (int)(length) == os.path.getsize(localfile) and t1 < t2:
#skip download this object these conditions match
print "no need to get %s/%s to %s" % (bucket, object, localfile)
return RET_SKIP
else:
try:
dirname = os.path.dirname(localfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if temp_dir:
dirname = os.path.dirname(temp_filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
except:
pass
filename = localfile
if temp_dir:
filename = temp_filename
if os.path.isdir(filename):
print "no need to get %s/%s to %s" % (bucket, object, filename)
return RET_SKIP
ret = continue_get(bucket, object, filename)
if ret:
print "get %s/%s to %s OK" % (bucket, object, localfile)
if temp_dir:
shutil.move(temp_filename, localfile)
pass
return RET_OK
else:
print "get %s/%s to %s FAIL" % (bucket, object, localfile)
except:
print "get %s/%s to %s exception" % (bucket, object, localfile)
print sys.exc_info()[0], sys.exc_info()[1]
os.remove(temp_filename)
return RET_FAIL
class DownloadObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
self.ok_num = 0
self.fail_num = 0
self.skip_num = 0
def run(self):
while 1:
try:
(get_object, bucket, object, object_prefix, local_path, length, last_modify_time, replace, retry_times, temp_dir) = self.queue.get(block=False)
ret = get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, self.retry_times, temp_dir)
if ret == RET_OK:
self.ok_num += 1
elif ret == RET_SKIP:
self.skip_num += 1
else:
self.fail_num += 1
self.queue.task_done()
except Queue.Empty:
break
except:
self.fail_num += 1
print sys.exc_info()[0], sys.exc_info()[1]
self.queue.task_done()
global GET_SKIP
global GET_OK
global GET_FAIL
lock.acquire()
GET_SKIP += self.skip_num
GET_OK += self.ok_num
GET_FAIL += self.fail_num
lock.release()
def cmd_downloadallobject(args, options):
check_args(3, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[2]
if os.path.isfile(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
replace = False
if options.replace is not None and options.replace.lower() == "true":
replace = True
prefix = object
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
temp_dir = None
if options.temp_dir:
temp_dir = options.temp_dir
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
marker = ""
delimiter = ''
maxkeys = '1000'
handled_obj_num = 0
while 1:
queue = Queue.Queue(0)
for i in xrange(0, retry_times):
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status/100 == 5:
continue
else:
break
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = format_unixtime(i[2])
if str(length) == "0" and object.endswith("/"):
continue
handled_obj_num += 1
queue.put((get_object, bucket, object, prefix, local_path, length, last_modify_time, replace, MAX_RETRY_TIMES, temp_dir))
thread_pool = []
for i in xrange(thread_num):
current = DownloadObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
if len(marker) == 0:
break
global GET_OK
global GET_SKIP
global GET_FAIL
print "Total being downloaded objects num: %s, they are downloaded into %s" % (GET_OK + GET_FAIL + GET_SKIP, local_path)
print "OK num:%s, SKIP num:%s, FAIL num:%s" % (GET_OK, GET_SKIP, GET_FAIL)
if temp_dir and os.path.abspath(local_path) != os.path.abspath(temp_dir):
shutil.rmtree(temp_dir, True)
if GET_FAIL != 0:
exit(-1)
def put_object(bucket, object, local_file, local_modify_time, is_replace, is_check_md5=False, content_type="", multipart_threshold=100*1024*1024, retry_times=2):
'''
return RET_OK, RET_FAIL, RET_SKIP
'''
if not os.path.isfile(local_file):
print "upload %s FAIL, no such file." % (local_file)
return RET_FAIL
show_bar = False
oss = get_oss(show_bar)
object = smart_code(object)
if len(object) == 0:
print "object is empty when put /%s/%s, skip" % (bucket, object)
return RET_SKIP
local_file_size = os.path.getsize(local_file)
if not is_replace:
try:
res = oss.head_object(bucket, object)
if res.status == 200 and str(local_file_size) == res.getheader('content-length'):
oss_gmt = res.getheader('last-modified')
format = "%a, %d %b %Y %H:%M:%S GMT"
oss_last_modify_time = format_unixtime(oss_gmt, format)
if not local_modify_time:
local_modify_time = (int)(os.path.getmtime(local_file))
if oss_last_modify_time >= local_modify_time:
#print "upload %s is skipped" % (local_file)
return RET_SKIP
except:
print "%s %s" % (sys.exc_info()[0], sys.exc_info()[1])
if is_check_md5:
md5string, base64md5 = get_file_md5(local_file)
for i in xrange(retry_times):
try:
if local_file_size > multipart_threshold:
upload_id = ""
thread_num = 5
max_part_num = 10000
headers = {}
if is_check_md5:
headers['x-oss-meta-md5'] = md5string
if content_type:
headers['Content-Type'] = content_type
res = oss.multi_upload_file(bucket, object, local_file, upload_id, thread_num, max_part_num, headers, check_md5=is_check_md5)
else:
headers = {}
if is_check_md5:
headers['Content-MD5'] = base64md5
headers['x-oss-meta-md5'] = md5string
res = oss.put_object_from_file(bucket, object, local_file, content_type, headers)
if 200 == res.status:
return RET_OK
else:
print "upload %s to /%s/%s FAIL, status:%s, request-id:%s" % (local_file, bucket, object, res.status, res.getheader("x-oss-request-id"))
except:
print "upload %s/%s from %s exception" % (bucket, object, local_file)
print sys.exc_info()[0], sys.exc_info()[1]
return RET_FAIL
class UploadObjectWorker(threading.Thread):
def __init__(self, check_point_file, retry_times, queue):
threading.Thread.__init__(self)
self.check_point_file = check_point_file
self.queue = queue
self.file_time_map = {}
self.error_file_list = []
self.retry_times = retry_times
self.ok_num = 0
self.fail_num = 0
self.skip_num = 0
def run(self):
global PUT_SKIP
global PUT_OK
global PUT_FAIL
global TOTAL_PUT
global FILE_NUM_OK
while 1:
try:
(put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold) = self.queue.get(block=False)
ret = put_object(bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold, self.retry_times)
is_ok = False
if ret == RET_OK:
is_ok = True
self.ok_num += 1
PUT_OK += 1
FILE_NUM_OK += 1
elif ret == RET_SKIP:
is_ok = True
self.skip_num += 1
PUT_SKIP += 1
FILE_NUM_OK += 1
else:
self.fail_num += 1
PUT_FAIL += 1
self.error_file_list.append(local_file)
if is_ok:
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
self.file_time_map[local_file_full_path] = (int)(os.path.getmtime(local_file))
sum = (PUT_SKIP + PUT_OK + PUT_FAIL)
if TOTAL_PUT > 0:
exec("rate = 100*%s/(%s*1.0)" % (sum, TOTAL_PUT))
else:
rate = 0
print '\rOK:%s, FAIL:%s, SKIP:%s, TOTAL_DONE:%s, TOTAL_TO_DO:%s, PROCESS:%.2f%%' % (PUT_OK, PUT_FAIL, PUT_SKIP, sum, TOTAL_PUT, rate),
sys.stdout.flush()
if self.ok_num % AUTO_DUMP_FILE_NUM == 0:
if len(self.file_time_map) != 0:
dump_check_point(self.check_point_file, self.file_time_map)
self.file_time_map = {}
self.queue.task_done()
except Queue.Empty:
break
except:
PUT_FAIL += 1
print sys.exc_info()[0], sys.exc_info()[1]
self.queue.task_done()
if len(self.error_file_list) != 0:
lock.acquire()
ERROR_FILE_LIST.extend(self.error_file_list)
lock.release()
if len(self.file_time_map) != 0:
dump_check_point(self.check_point_file, self.file_time_map)
def load_check_point(check_point_file):
file_time_map = {}
if os.path.isfile(check_point_file):
f = open(check_point_file)
for line in f:
line = line.strip()
tmp_list = line.split('#')
if len(tmp_list) > 1:
time_stamp = (float)(tmp_list[0])
time_stamp = (int)(time_stamp)
#file_name = "".join(tmp_list[1:])
file_name = line[len(tmp_list[0])+1:]
file_name = format_utf8(file_name)
if file_time_map.has_key(file_name) and file_time_map[file_name] > time_stamp:
continue
file_time_map[file_name] = time_stamp
f.close()
return file_time_map
def load_filename(filename_file):
filenames = []
if os.path.isfile(filename_file):
f = open(filename_file)
for line in f:
line = line.strip()
filenames.append(line)
return filenames
def dump_filename(filename_file, filenames=None):
if len(filename_file) == 0 or len(filenames) == 0:
return
try:
f = open(filename_file,"w")
for filename in filenames:
line = "%s\n" %(filename)
f.write(line)
except:
pass
try:
f.close()
except:
pass
def dump_check_point(check_point_file, result_map=None):
if len(check_point_file) == 0 or len(result_map) == 0:
return
lock.acquire()
old_file_time_map = {}
if os.path.isfile(check_point_file):
old_file_time_map = load_check_point(check_point_file)
try:
f = open(check_point_file,"w")
for k, v in result_map.items():
if old_file_time_map.has_key(k) and old_file_time_map[k] < v:
del old_file_time_map[k]
line = "%s#%s\n" % (v, k)
line = format_utf8(line)
f.write(line)
for k, v in old_file_time_map.items():
line = "%s#%s\n" % (v, k)
line = format_utf8(line)
f.write(line)
except:
pass
try:
f.close()
except:
pass
lock.release()
def format_object(object):
tmp_list = object.split(os.sep)
object = "/".join(x for x in tmp_list if x.strip() and x != "/")
while 1:
if object.find('//') == -1:
break
object = object.replace('//', '/')
return object
def get_object_name(filename, filepath):
filename = format_object(filename)
filepath = format_object(filepath)
file_name = os.path.basename(filename)
return file_name
def get_file_names_from_disk(path, topdown):
filenames = []
for root, dirs, files in os.walk(path, topdown):
for filespath in files:
filename = os.path.join(root, filespath)
filenames.append(filename)
return filenames
#for offline uploadfile to oss
def cmd_upload_disk(args, options):
check_args(3, args)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[1]
if not os.path.isdir(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
if not local_path.endswith(os.sep):
local_path = "%s%s" % (local_path, os.sep)
if not options.device_id:
print "please set device id with --device_id=xxx"
exit(-1)
check_point_file = ""
is_check_point = False
file_time_map = {}
if options.check_point:
is_check_point = True
check_point_file = options.check_point
file_time_map = load_check_point(check_point_file)
filename_file = ""
filenames = []
is_filename_file = False
if options.filename_list:
filename_file = options.filename_list
if os.path.isfile(filename_file):
is_filename_file = True
filenames = load_filename(filename_file)
prefix = object
is_replace = False
if options.replace:
if options.replace.lower() == "true":
is_replace = True
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
is_check_md5 = False
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
total_upload_num = 0
topdown = True
def process_localfile(items):
queue = Queue.Queue(0)
for local_file in items:
if os.path.isfile(local_file):
local_modify_time = 0
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
if is_check_point and file_time_map.has_key(local_file_full_path):
local_modify_time = (int)(os.path.getmtime(local_file))
record_modify_time = file_time_map[local_file_full_path]
if local_modify_time <= record_modify_time:
print 'file:%s already upload' %(local_file_full_path)
global FILE_NUM_OK
FILE_NUM_OK += 1
continue
if options.skip_dir and options.skip_dir.lower() == "true":
object = smart_code(os.path.basename(local_file))
else:
object = smart_code(local_file)
if options.strip_dir:
strip_dir = options.strip_dir
if not strip_dir.endswith("/"):
strip_dir = "%s/" % strip_dir
if object.startswith(strip_dir):
object = object[len(options.strip_dir):]
if options.skip_suffix and options.skip_suffix.lower() == "true":
pos = object.rfind(".")
if pos != -1:
object = object[:pos]
while 1:
if object.startswith("/"):
object = object[1:]
else:
break
if prefix:
if prefix.endswith("/"):
object = "%s%s" % (prefix, object)
else:
object = "%s/%s" % (prefix, object)
queue.put((put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, options.content_type, multipart_threshold))
qsize = queue.qsize()
global TOTAL_PUT
TOTAL_PUT += qsize
thread_pool = []
for i in xrange(thread_num):
current = UploadObjectWorker(check_point_file, retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
return qsize
if not is_filename_file:
filenames = get_file_names_from_disk(local_path, topdown);
dump_filename(filename_file, filenames)
global FILE_NUM_TOTAL
FILE_NUM_TOTAL += len(filenames)
total_upload_num += process_localfile(filenames);
print ""
print "DEVICEID:sn%s" % options.device_id
global PUT_OK
global PUT_SKIP
global PUT_FAIL
print "This time Total being uploaded localfiles num: %s" % (PUT_OK + PUT_SKIP + PUT_FAIL)
print "This time OK num:%s, SKIP num:%s, FAIL num:%s" % (PUT_OK, PUT_SKIP, PUT_FAIL)
print "Total file num:%s, OK file num:%s" %(FILE_NUM_TOTAL, FILE_NUM_OK)
if PUT_FAIL != 0:
print "FailUploadList:"
for i in set(ERROR_FILE_LIST):
print i
if options.out:
try:
f = open(options.out, "w")
for i in set(ERROR_FILE_LIST):
f.write("%s\n" % i.strip())
f.close()
print "FailUploadList is written into %s" % options.out
except:
print "write upload failed file exception"
print sys.exc_info()[0], sys.exc_info()[1]
exit(-1)
def cmd_upload_object_from_localdir(args, options):
check_args(3, args)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[1]
if not os.path.isdir(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
if not local_path.endswith(os.sep):
local_path = "%s%s" % (local_path, os.sep)
is_check_point = False
check_point_file = ""
file_time_map = {}
if options.check_point:
is_check_point = True
check_point_file = options.check_point
file_time_map = load_check_point(check_point_file)
prefix = object
is_replace = False
if options.replace:
if options.replace.lower() == "true":
is_replace = True
is_check_md5 = False
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
total_upload_num = 0
topdown = True
def process_localfile(items):
queue = Queue.Queue(0)
for item in items:
local_file = os.path.join(root, item)
if os.path.isfile(local_file):
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
local_modify_time = 0
if is_check_point and file_time_map.has_key(local_file_full_path):
local_modify_time = (int)(os.path.getmtime(local_file))
record_modify_time = file_time_map[local_file_full_path]
if local_modify_time <= record_modify_time:
continue
object = get_object_name(smart_code(local_file), smart_code(local_path))
if prefix:
if prefix.endswith("/"):
object = "%s%s" % (prefix, object)
else:
object = "%s/%s" % (prefix, object)
content_type = ''
queue.put((put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold))
qsize = queue.qsize()
thread_pool = []
global TOTAL_PUT
TOTAL_PUT += qsize
for i in xrange(thread_num):
current = UploadObjectWorker(check_point_file, retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
return qsize
for root, dirs, files in os.walk(local_path, topdown):
total_upload_num += process_localfile(files)
total_upload_num += process_localfile(dirs)
global PUT_OK
global PUT_SKIP
global PUT_FAIL
print ""
print "Total being uploaded localfiles num: %s" % (PUT_OK + PUT_SKIP + PUT_FAIL)
print "OK num:%s, SKIP num:%s, FAIL num:%s" % (PUT_OK, PUT_SKIP, PUT_FAIL)
if PUT_FAIL != 0:
exit(-1)
def get_object_list_marker_from_xml(body):
#return ([(object, object_length, last_modify_time)...], marker)
object_meta_list = []
next_marker = ""
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
object = convert_utf8(i[0])
last_modify_time = i[1]
length = i[3]
object_meta_list.append((object, length, last_modify_time))
if hh.is_truncated:
next_marker = hh.nextmarker
return (object_meta_list, next_marker)
def cmd_deleteallobject(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
force_delete = False
if options.force and options.force.lower() == "true":
force_delete = True
if not force_delete:
ans = raw_input("DELETE all objects? Y/N, default is N: ")
if ans.lower() != "y":
print "quit."
exit(-1)
prefix = object
marker = ''
delimiter = ''
maxkeys = '1000'
if options.marker:
marker = options.marker
if options.delimiter:
delimiter = options.delimiter
if options.maxkeys:
maxkeys = options.maxkeys
debug = True
if not delete_all_objects(get_oss(), bucket, prefix, delimiter, marker, maxkeys, debug):
exit(-1)
def cmd_getallbucket(args, options):
width = 20
print "%s %s %s" % ("CreateTime".ljust(width), "BucketLocation".ljust(width), "BucketName".ljust(width))
marker = ""
prefix = ""
headers = None
total_num = 0
while 1:
res = get_oss().get_service(headers, prefix, marker)
if (res.status / 100) == 2:
body = res.read()
(bucket_meta_list, marker) = get_bucket_meta_list_marker_from_xml(body)
for i in bucket_meta_list:
print "%s %s %s" % (str(convert_to_localtime(i.creation_date)).ljust(width), i.location.ljust(width), i.name)
total_num += 1
else:
break
if not marker:
break
print "\nBucket Number is: %s" % total_num
return res
def cmd_createbucket(args, options):
check_args(2, args)
if options.acl is not None and options.acl not in ACL_LIST:
print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
sys.exit(1)
acl = ''
if options.acl:
acl = options.acl
bucket = parse_bucket(args[1])
if options.location is not None:
location = options.location
return get_oss().put_bucket_with_location(bucket, acl, location)
return get_oss().put_bucket(bucket, acl)
def cmd_getbucketlocation(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_bucket_location(bucket)
if res.status / 100 == 2:
body = res.read()
h = GetBucketLocationXml(body)
print h.location
return res
def cmd_deletebucket(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
return get_oss().delete_bucket(bucket)
def cmd_deletewholebucket(args, options):
check_args(2, args)
ans = raw_input("DELETE whole bucket? Y/N, default is N: ")
if ans.lower() != "y":
print "quit."
exit(-1)
bucket = parse_bucket(args[1])
debug = True
delete_marker = ""
delete_upload_id_marker = ""
if options.marker:
delete_marker = options.marker
if options.upload_id:
delete_upload_id_marker = options.upload_id
if not clear_all_objects_in_bucket(get_oss(), bucket, delete_marker, delete_upload_id_marker, debug):
exit(-1)
def delete_object(bucket, object, retry_times=2):
object = smart_code(object)
global DELETE_OK
ret = False
for i in xrange(retry_times):
try:
oss = get_oss()
res = oss.delete_object(bucket, object)
if 2 == res.status / 100:
ret = True
if ret:
DELETE_OK += 1
print "delete %s/%s OK" % (bucket, object)
return ret
else:
print "delete %s/%s FAIL, status:%s, request-id:%s" % (bucket, object, res.status, res.getheader("x-oss-request-id"))
except:
print "delete %s/%s exception" % (bucket, object)
print sys.exc_info()[0], sys.exc_info()[1]
return False
class DeleteObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
def run(self):
while 1:
try:
(delete_object, bucket, object) = self.queue.get(block=False)
delete_object(bucket, object, self.retry_times)
self.queue.task_done()
except Queue.Empty:
break
except:
self.queue.task_done()
def cmd_deletebyfile(args, options):
check_args(2, args)
localfile = args[1]
check_localfile(localfile)
queue = Queue.Queue(0)
f = open(localfile)
for line in f:
line = line.strip()
(bucket, object) = parse_bucket_object(line)
if len(bucket) != 0 and len(object) != 0:
queue.put((delete_object, bucket, object))
f.close()
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
thread_pool = []
for i in xrange(thread_num):
current = DeleteObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
def cmd_setacl(args, options):
check_args(2, args)
if options.acl is None or options.acl not in ACL_LIST:
print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
sys.exit(1)
bucket = parse_bucket(args[1])
return get_oss().put_bucket(bucket, options.acl)
def cmd_getacl(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_bucket_acl(bucket)
if (res.status / 100) == 2:
body = res.read()
h = GetBucketAclXml(body)
print h.grant
return res
def to_http_headers(string):
headers_map = {}
for i in string.split('#'):
key_value_list = i.strip().split(':')
if len(key_value_list) >= 2:
headers_map[key_value_list[0]] = ":".join(key_value_list[1:])
return headers_map
def cmd_mkdir(args, options):
check_args(2, args)
if not args[1].endswith('/'):
args[1] += '/'
(bucket, object) = parse_bucket_object(args[1])
res = get_oss().put_object_from_string(bucket, object, "")
return res
def handler(signum, frame):
print 'Signal handler called with signal', signum
raise Exception("timeout")
try:
signal.signal(signal.SIGALRM, handler)
except:
pass
def cmd_put(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
if os.path.getsize(localfile) > MAX_OBJECT_SIZE:
print "locafile:%s is bigger than %s, it is not support by put, please use multiupload instead." % (localfile, MAX_OBJECT_SIZE)
exit(-1)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
content_type = ""
headers = {}
if options.content_type:
content_type = options.content_type
if options.headers:
headers = to_http_headers(options.headers)
if options.check_md5:
if options.check_md5.lower() == "true":
md5string, base64md5 = get_file_md5(localfile)
headers["Content-MD5"] = base64md5
headers["x-oss-meta-md5"] = md5string
timeout = 0
if options.timeout:
timeout = (int)(options.timeout)
print "timeout", timeout
try:
signal.alarm(timeout)
except:
pass
res = get_oss().put_object_from_file(bucket, object, localfile, content_type, headers)
try:
signal.alarm(0) # Disable the signal
except:
pass
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def print_url(host, bucket, object, res):
print ""
second_level_domain = OSS_HOST
orginal_object = object
object = oss_quote(object)
if check_bucket_valid(bucket) and not is_ip(second_level_domain):
if is_oss_host(second_level_domain):
print "Object URL is: http://%s.%s/%s" % (bucket, second_level_domain, object)
else:
print "Object URL is: http://%s/%s" % (second_level_domain, object)
else:
print "Object URL is: http://%s/%s/%s" % (second_level_domain, bucket, object)
print "Object abstract path is: oss://%s/%s" % (bucket, orginal_object)
header_map = convert_header2map(res.getheaders())
print "ETag is %s " % safe_get_element("etag", header_map)
def cmd_upload(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
localfile_size = os.path.getsize(localfile)
if localfile_size > multipart_threshold or localfile_size > MAX_OBJECT_SIZE:
return cmd_multi_upload(args, options)
return cmd_put(args, options)
def cmd_upload_group(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
headers = {}
content_type = ''
if options.headers:
headers = to_http_headers(options.headers)
if options.content_type:
content_type = options.content_type
headers['Content-Type'] = content_type
thread_num = 10
if options.thread_num:
thread_num = (int)(options.thread_num)
max_part_num = 1000
if options.max_part_num:
max_part_num = (int)(options.max_part_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
oss = get_oss()
oss.set_retry_times(retry_times)
res = oss.upload_large_file(bucket, object, localfile, thread_num, max_part_num, headers)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_multi_upload(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
is_check_md5 = False
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
thread_num = 10
if options.thread_num:
thread_num = (int)(options.thread_num)
max_part_num = 1000
if options.max_part_num:
max_part_num = (int)(options.max_part_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
md5string, base64md5 = get_file_md5(localfile)
headers["x-oss-meta-md5"] = md5string
oss = get_oss()
oss.set_retry_times(retry_times)
upload_id = ""
if options.upload_id:
upload_id = options.upload_id
res = oss.get_all_parts(bucket, object, upload_id, max_parts=1)
if res.status != 200:
return res
if not upload_id:
upload_ids = []
upload_ids = get_upload_id_list(oss, bucket, object)
if upload_ids:
upload_ids = sorted(upload_ids)
upload_id = upload_ids[0]
res = oss.multi_upload_file(bucket, object, localfile, upload_id, thread_num, max_part_num, headers, debug=True, check_md5=is_check_md5)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_copy(args, options):
check_args(3, args)
(bucket_source, object_source) = parse_bucket_object(args[1])
check_bucket_object(bucket_source, object_source)
(bucket, object) = parse_bucket_object(args[2])
check_bucket_object(bucket, object)
content_type = ""
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
if options.content_type:
content_type = options.content_type
headers['Content-Type'] = content_type
res = get_oss().copy_object(bucket_source, object_source, bucket, object, headers)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_upload_part_copy(args, options):
check_args(3, args)
(bucket_source, object_source) = parse_bucket_object(args[1])
check_bucket_object(bucket_source, object_source)
(bucket, object) = parse_bucket_object(args[2])
check_bucket_object(bucket, object)
#head object to get object size
headers = {}
res = get_oss().head_object(bucket_source, object_source, headers = headers)
if res.status != 200:
print 'copy large file fail because head object fail, status:%s' %(res.status)
sys.exit(-1)
content_len = (int)(res.getheader('Content-Length'))
etag = res.getheader('ETag')
#get part size
default_part_size = 10 * 1024 * 1024
part_size = default_part_size
max_part_num=10000
min_part_size = 5 * 1024 * 1024
if options.part_size:
part_size = (int)(eval(options.part_size))
if part_size < min_part_size:
print 'part size too small, change part size to %s' %(default_part_size)
part_size = default_part_size
if part_size * max_part_num < content_len:
part_size = (content_len + max_part_num - content_len % max_part_num) / max_part_num
print 'part num more than max part num %s, change part size to %s' %(max_part_num, part_size)
if content_len % part_size:
part_size_list = [part_size] * (content_len / part_size) + [ content_len % part_size]
else:
part_size_list = [part_size] * (content_len / part_size)
#get upload id
if options.upload_id:
upload_id = options.upload_id
else:
res = get_oss().init_multi_upload(bucket, object)
if res.status != 200:
print 'copy large file fail because init multipart upload fail, status:%s' %(res.status)
sys.exit(-1)
upload_id = GetInitUploadIdXml(res.read()).upload_id
#upload part copy
start = 0
part_number = 1
for part_size in part_size_list:
headers = {'x-oss-copy-source-range': ('bytes=%d-%d' % (start, start + part_size-1))}
headers['x-oss-copy-source-if-match'] = etag
res = get_oss().copy_object_as_part(bucket_source, object_source, bucket, object, upload_id, part_number, headers)
if res.status != 200:
print 'copy large file fail because upload part copy fail, status:%s, upload_id:%s' %(res.status, upload_id)
sys.exit(-1)
start += part_size
part_number += 1
#complete multipart upload
part_xml = get_part_xml(get_oss(), bucket, object, upload_id)
res = get_oss().complete_upload(bucket, object, upload_id, part_xml)
if res.status != 200:
print 'copy large file fail because complete multipart upload fail, status:%s, upload_id:%s' %(res.status, upload_id)
sys.exit(-1)
else:
print_url(OSS_HOST, bucket, object, res)
return res
def copy_object(src_bucket, src_object, des_bucket, des_object, headers, replace, retry_times = 3):
global COPY_OK
if COPY_OK > 0 and COPY_OK % 100 == 0:
print "%s objects are copied OK, marker is:%s" % (COPY_OK, src_object)
for i in xrange(retry_times):
tmp_headers = headers.copy()
try:
if replace:
res = get_oss().copy_object(src_bucket, src_object, des_bucket, des_object, tmp_headers)
if res.status == 200:
COPY_OK += 1
return True
else:
print "copy /%s/%s to /%s/%s FAIL, status:%s, request-id:%s" % \
(src_bucket, src_object, des_bucket, des_object, res.status, res.getheader("x-oss-request-id"))
else:
res = get_oss().head_object(des_bucket, des_object)
if res.status == 200:
COPY_OK += 1
return True
elif res.status == 404:
res = get_oss().copy_object(src_bucket, src_object, des_bucket, des_object, tmp_headers)
if res.status == 200:
COPY_OK += 1
return True
else:
print "copy /%s/%s to /%s/%s FAIL, status:%s, request-id:%s" % \
(src_bucket, src_object, des_bucket, des_object, res.status, res.getheader("x-oss-request-id"))
except:
print "copy /%s/%s to /%s/%s exception" % (src_bucket, src_object, des_bucket, des_object)
print sys.exc_info()[0], sys.exc_info()[1]
try:
res = get_oss().head_object(src_bucket, src_object)
if res.status == 200:
length = (int)(res.getheader('content-length'))
max_length = 1*1024*1024*1024
if length > max_length:
print "/%s/%s is bigger than %s, copy may fail. skip this one." \
% (src_bucket, src_object, max_length)
print "please use get command to download the object and then use multiupload command to upload the object."
return False
except:
print sys.exc_info()[0], sys.exc_info()[1]
pass
sleep_time = 300
print "sleep %s" % sleep_time
time.sleep(sleep_time)
print "copy /%s/%s to /%s/%s FAIL" % (src_bucket, src_object, des_bucket, des_object)
return False
class CopyObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
def run(self):
while 1:
try:
(copy_object, src_bucket, src_object, des_bucket, des_object, replace, headers) = self.queue.get(block=False)
copy_object(src_bucket, src_object, des_bucket, des_object, headers, replace, self.retry_times)
self.queue.task_done()
except Queue.Empty:
break
except:
self.queue.task_done()
def cmd_copy_bucket(args, options):
check_args(3, args)
(src_bucket, src_prefix) = parse_bucket_object(args[1])
(des_bucket, des_prefix) = parse_bucket_object(args[2])
if des_prefix and not des_prefix.endswith("/"):
des_prefix = "%s/" % des_prefix
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
replace = False
if options.replace is not None and options.replace.lower() == "true":
replace = True
marker = ""
if options.marker:
marker = options.marker
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
delimiter = ''
maxkeys = '1000'
handled_obj_num = 0
while 1:
queue = Queue.Queue(0)
res = get_oss().get_bucket(src_bucket, src_prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = i[2]
if str(length) == "0" and object.endswith("/"):
continue
handled_obj_num += 1
src_object = smart_code(object)
tmp_object = src_object
if src_prefix.endswith("/"):
if src_prefix == object[:len(src_prefix)]:
tmp_object = object[len(src_prefix):]
while 1:
if not tmp_object.startswith("/"):
break
tmp_object = tmp_object[1:]
if des_prefix:
des_object = "%s%s" % (des_prefix, tmp_object)
else:
des_object = tmp_object
queue.put((copy_object, src_bucket, src_object, des_bucket, des_object, replace, headers))
#copy_object(src_bucket, src_object, des_bucket, des_object, replace)
thread_pool = []
for i in xrange(thread_num):
current = CopyObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
if len(marker) == 0:
break
print "Total being copied objects num: %s, from /%s/%s to /%s/%s" % \
(handled_obj_num, src_bucket, src_prefix, des_bucket, des_prefix)
global COPY_OK
print "OK num:%s" % COPY_OK
print "FAIL num:%s" % (handled_obj_num - COPY_OK)
def continue_get(bucket, object, localfile, headers=None, retry_times=3):
length = -1
local_length = -2
tmp_headers = {}
header_map = {}
if headers:
tmp_headers = headers.copy()
try:
res = get_oss().head_object(bucket, object, tmp_headers)
if 200 == res.status:
length = (int)(res.getheader('content-length'))
header_map = convert_header2map(res.getheaders())
else:
print "can not get the length of object:", object
return False
except:
print sys.exc_info()[0], sys.exc_info()[1]
return False
endpos = length - 1
for i in xrange(retry_times):
curpos = 0
range_info = 'bytes=%d-%d' % (curpos, endpos)
if os.path.isfile(localfile):
local_length = os.path.getsize(localfile)
if i == 0 and header_map.has_key('x-oss-meta-md5'):
oss_md5_string = header_map['x-oss-meta-md5']
local_md5_string, base64_md5 = get_file_md5(localfile)
if local_md5_string.lower() == oss_md5_string.lower():
return True
else:
os.remove(localfile)
elif local_length == length:
#print "localfile:%s exists and length is equal. please check if it is ok. you can remove it first and download again." % localfile
return True
elif local_length < length:
if i == 0:
os.remove(localfile)
else:
curpos = local_length
range_info = 'bytes=%d-%d' % (curpos, endpos)
print "localfile:%s exists and length is:%s, continue to download. range:%s." % (localfile, local_length, range_info)
else:
os.remove(localfile)
file = open(localfile, "ab+")
tmp_headers = {}
if headers:
tmp_headers = headers.copy()
tmp_headers['Range'] = range_info
file.seek(curpos)
is_read_ok = False
oss_md5_string = ''
try:
res = get_oss().get_object(bucket, object, tmp_headers)
if res.status/100 == 2:
header_map = convert_header2map(res.getheaders())
if header_map.has_key('x-oss-meta-md5'):
oss_md5_string = header_map['x-oss-meta-md5']
while True:
content = res.read(RECV_BUF_SIZE)
if content:
file.write(content)
curpos += len(content)
else:
break
is_read_ok = True
else:
print "range get /%s/%s [%s] ret:%s, request-id:%s" % (bucket, object, range_info, res.status, res.getheader("x-oss-request-id"))
except:
print "range get /%s/%s [%s] exception" % (bucket, object, range_info)
print sys.exc_info()[0], sys.exc_info()[1]
file.flush()
file.close()
file_opened = False
continue
file.flush()
file.close()
if os.path.isfile(localfile):
local_length = os.path.getsize(localfile)
if is_read_ok and length == local_length:
if oss_md5_string != '':
md5string, base64md5 = get_file_md5(localfile)
if md5string.lower() != oss_md5_string.lower():
print "The object %s is download to %s failed. file md5 is incorrect." % (object, localfile)
return False
return True
else:
print "The object %s is download to %s failed. file length is incorrect.length is:%s local_length:%s" % (object, localfile, length, local_length)
return False
def cmd_get(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
localfile = args[2]
localfile = smart_code(localfile)
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
if options.continue_download:
retry_times = 3
res = continue_get(bucket, object, localfile, headers, retry_times)
else:
tmp_headers = {}
tmp_headers = headers.copy()
res = get_oss().get_object_to_file(bucket, object, localfile, headers=tmp_headers)
if res.status/100 == 2:
header_map = convert_header2map(res.getheaders())
if header_map.has_key('x-oss-meta-md5'):
oss_md5string = header_map['x-oss-meta-md5']
md5string, base64md5 = get_file_md5(localfile)
if md5string.lower() != oss_md5string.lower():
print "The object %s is download to %s failed. file md5 is incorrect." % (object, localfile)
sys.exit(1)
else:
content_length = int(header_map['content-length'])
local_length = os.path.getsize(localfile)
if content_length != local_length:
print "The object %s is download to %s failed. file length is incorrect." % (object, localfile)
sys.exit(1)
else:
return res
if res:
print "The object %s is downloaded to %s, please check." % (object, localfile)
return res
def cmd_multi_get(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
localfile = args[2]
localfile = smart_code(localfile)
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
show_bar = False
oss = get_oss(show_bar)
ret = multi_get(oss, bucket, object, localfile, thread_num, retry_times)
if ret:
print "The object %s is downloaded to %s, please check." % (object, localfile)
else:
print "Download object:%s failed!" % (object)
exit(-1)
def cmd_cat(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object(bucket, object)
if res.status == 200:
data = ""
while 1:
data = res.read(10240)
if len(data) != 0:
print data
else:
break
return res
def cmd_meta(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
headers = {}
res = get_oss().head_object(bucket, object, headers = headers)
if res.status == 200:
header_map = convert_header2map(res.getheaders())
width = 16
print "%s: %s" % ("objectname".ljust(width), object)
for key, value in header_map.items():
print "%s: %s" % (key.ljust(width), value)
return res
def cmd_info(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object_info(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_delete(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
return get_oss().delete_object(bucket, object)
def cmd_cancel(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
return get_oss().cancel_upload(bucket, object, options.upload_id)
def cmd_sign_url(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
if options.timeout:
timeout = options.timeout
else:
timeout = "600"
print "timeout is %s seconds." % timeout
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
method = 'GET'
print get_oss().sign_url(method, bucket, object, int(timeout))
def cmd_configure(args, options):
if options.accessid is None or options.accesskey is None:
print "%s miss parameters, use --accessid=[accessid] --accesskey=[accesskey] to specify id/key pair" % args[0]
sys.exit(-1)
config = ConfigParser.RawConfigParser()
config.add_section(CONFIGSECTION)
if options.host is not None:
config.set(CONFIGSECTION, 'host', options.host)
config.set(CONFIGSECTION, 'accessid', options.accessid)
config.set(CONFIGSECTION, 'accesskey', options.accesskey)
if options.sts_token:
config.set(CONFIGSECTION, 'sts_token', options.sts_token)
cfgfile = open(CONFIGFILE, 'w+')
config.write(cfgfile)
print "Your configuration is saved into %s ." % CONFIGFILE
cfgfile.close()
import stat
os.chmod(CONFIGFILE, stat.S_IREAD | stat.S_IWRITE)
def cmd_help(args, options):
print HELP
def cmd_create_link(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
object_list = args[2:]
return get_oss().create_link_from_list(bucket, object, object_list)
def cmd_create_link_from_file(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
object_list = f.readlines()
f.close()
return get_oss().create_link_from_list(bucket, object, object_list)
def cmd_get_link_index(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_link_index(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_create_group_from_file(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
object_list = f.readlines()
f.close()
part_msg_list = []
for i in range(len(object_list)):
object_list[i] = object_list[i].rstrip('\n')
res = get_oss().head_object(bucket, object_list[i])
if res.status != 200:
print "head object: ", object_list[i], ", ", res.status
print 'Create Group Fail!'
return res
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
etag = etag.replace("\"", "")
list = [str(i), object_list[i], etag]
part_msg_list.append(list)
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
return get_oss().post_object_group(bucket, object, object_group_msg_xml)
def cmd_create_group(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
object_list = args[2:]
part_msg_list = []
for i in range(len(object_list)):
res = get_oss().head_object(bucket, object_list[i])
if res.status != 200:
print "head object: ", object_list[i], ", ", res.status
print 'Create Group Fail!'
return res
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
etag = etag.replace("\"", "")
list = [str(i), object_list[i], etag]
part_msg_list.append(list)
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
return get_oss().post_object_group(bucket, object, object_group_msg_xml)
def cmd_get_group_index(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object_group_index(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_logging(args, options):
source_bucket = ''
target_bucket = ''
prefix = ''
check_args(2, args)
if len(args) >= 3:
target_bucket = args[2]
(target_bucket, prefix) = parse_bucket_object(args[2])
source_bucket = parse_bucket(args[1])
target_bucket = parse_bucket(args[2])
res = get_oss().put_logging(source_bucket, target_bucket, prefix)
return res
def cmd_get_bucket_logging(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_logging(bucket)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_website(args, options):
bucket = ''
indexfile = ''
errorfile = ''
check_args(3, args)
if len(args) >= 3:
indexfile = args[2]
if len(args) >= 4:
errorfile = args[3]
bucket = parse_bucket(args[1])
res = get_oss().put_website(bucket, indexfile, errorfile)
return res
def cmd_get_bucket_website(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_website(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_website(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_website(bucket)
return res
def cmd_delete_bucket_logging(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_logging(bucket)
return res
def cmd_put_bucket_cors(args, options):
check_args(3, args)
bucket = parse_bucket(args[1])
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
content = f.read()
f.close()
return get_oss().put_cors(bucket, content)
def cmd_get_bucket_cors(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_cors(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_cors(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_cors(bucket)
return res
def cmd_options(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
headers = {}
is_ok = True
if options.origin:
headers['Origin'] = options.origin
else:
is_ok = False
method_list = ["GET", "PUT", "DELETE", "HEAD", "POST"]
if options.method:
if options.method not in method_list:
is_ok = False
else:
headers['Access-Control-Request-Method'] = options.method
else:
is_ok = False
if not is_ok:
print "please set origin and method with --origin=xxx --method=xxx, the value of --method SHOULD be one of %s" % (" ".join(method_list))
exit(-1)
res = get_oss().options(bucket, object, headers)
return res
def cmd_put_bucket_lifecycle(args, options):
check_args(3, args)
bucket = parse_bucket(args[1])
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
lifecycle_config = f.read()
f.close()
res = get_oss().put_lifecycle(bucket, lifecycle_config)
return res
def cmd_get_bucket_lifecycle(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_lifecycle(bucket)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_referer(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
allow_empty_referer = True
if options.allow_empty_referer and options.allow_empty_referer.lower() == "false":
allow_empty_referer = False
referer_list = []
if options.referer:
referer_list = options.referer.split(",")
res = get_oss().put_referer(bucket, allow_empty_referer, referer_list)
return res
def cmd_get_bucket_referer(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_referer(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_lifecycle(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_lifecycle(bucket)
return res
def get_oss(show_bar = True):
oss = OssAPI(OSS_HOST, ID, KEY, sts_token=STS_TOKEN)
oss.show_bar = show_bar
oss.set_send_buf_size(SEND_BUF_SIZE)
oss.set_recv_buf_size(RECV_BUF_SIZE)
oss.set_debug(IS_DEBUG)
return oss
def setup_credentials(options):
config = ConfigParser.ConfigParser()
try:
config.read(CONFIGFILE)
global OSS_HOST
global ID
global KEY
global STS_TOKEN
try:
OSS_HOST = config.get(CONFIGSECTION, 'host')
except Exception:
OSS_HOST = DEFAUL_HOST
ID = config.get(CONFIGSECTION, 'accessid')
KEY = config.get(CONFIGSECTION, 'accesskey')
try:
STS_TOKEN = config.get(CONFIGSECTION, 'sts_token')
except:
pass
if options.accessid is not None:
ID = options.accessid
if options.accesskey is not None:
KEY = options.accesskey
if options.sts_token is not None:
STS_TOKEN = options.sts_token
if options.host is not None:
OSS_HOST = options.host
except Exception:
if options.accessid is not None:
ID = options.accessid
if options.accesskey is not None:
KEY = options.accesskey
if options.sts_token is not None:
STS_TOKEN = options.sts_token
if options.host is not None:
OSS_HOST = options.host
if len(ID) == 0 or len(KEY) == 0:
print "can't get accessid/accesskey, setup use : config --accessid=accessid --accesskey=accesskey"
sys.exit(1)
def setup_cmdlist():
CMD_LIST['GetAllBucket'] = cmd_getallbucket
CMD_LIST['CreateBucket'] = cmd_createbucket
CMD_LIST['DeleteBucket'] = cmd_deletebucket
CMD_LIST['DeleteWholeBucket'] = cmd_deletewholebucket
CMD_LIST['DeleteByFile'] = cmd_deletebyfile
CMD_LIST['GetBucketLocation'] = cmd_getbucketlocation
CMD_LIST['GetAcl'] = cmd_getacl
CMD_LIST['SetAcl'] = cmd_setacl
CMD_LIST['List'] = cmd_listing
CMD_LIST['MkDir'] = cmd_mkdir
CMD_LIST['Init'] = cmd_init_upload
CMD_LIST['UploadPartFromString'] = cmd_upload_part_from_string
CMD_LIST['UploadPartFromFile'] = cmd_upload_part_from_file
CMD_LIST['ListPart'] = cmd_listpart
CMD_LIST['ListParts'] = cmd_listparts
CMD_LIST['GetAllPartSize'] = cmd_getallpartsize
CMD_LIST['ListAllObject'] = cmd_listallobject
CMD_LIST['ListAllDir'] = cmd_listalldir
CMD_LIST['DownloadAllObject'] = cmd_downloadallobject
CMD_LIST['UploadObjectFromLocalDir'] = cmd_upload_object_from_localdir
CMD_LIST['UploadDisk'] = cmd_upload_disk
CMD_LIST['DeleteAllObject'] = cmd_deleteallobject
CMD_LIST['Put'] = cmd_put
CMD_LIST['Copy'] = cmd_copy
CMD_LIST['CopyLargeFile'] = cmd_upload_part_copy
CMD_LIST['CopyBucket'] = cmd_copy_bucket
CMD_LIST['Upload'] = cmd_upload
CMD_LIST['UploadGroup'] = cmd_upload_group
CMD_LIST['MultiUpload'] = cmd_multi_upload
CMD_LIST['Get'] = cmd_get
CMD_LIST['MultiGet'] = cmd_multi_get
CMD_LIST['Cat'] = cmd_cat
CMD_LIST['Meta'] = cmd_meta
CMD_LIST['Info'] = cmd_info
CMD_LIST['Delete'] = cmd_delete
CMD_LIST['Cancel'] = cmd_cancel
CMD_LIST['Config'] = cmd_configure
CMD_LIST['Help'] = cmd_help
CMD_LIST['SignUrl'] = cmd_sign_url
CMD_LIST['CreateLink'] = cmd_create_link
CMD_LIST['CreateLinkFromFile'] = cmd_create_link_from_file
CMD_LIST['GetLinkIndex'] = cmd_get_link_index
CMD_LIST['CreateGroup'] = cmd_create_group
CMD_LIST['CreateGroupFromFile'] = cmd_create_group_from_file
CMD_LIST['GetGroupIndex'] = cmd_get_group_index
CMD_LIST['PutBucketLogging'] = cmd_put_bucket_logging
CMD_LIST['GetBucketLogging'] = cmd_get_bucket_logging
CMD_LIST['DeleteBucketLogging'] = cmd_delete_bucket_logging
CMD_LIST['PutBucketWebsite'] = cmd_put_bucket_website
CMD_LIST['GetBucketWebsite'] = cmd_get_bucket_website
CMD_LIST['DeleteBucketWebsite'] = cmd_delete_bucket_website
CMD_LIST['PutBucketCors'] = cmd_put_bucket_cors
CMD_LIST['GetBucketCors'] = cmd_get_bucket_cors
CMD_LIST['DeleteBucketCors'] = cmd_delete_bucket_cors
CMD_LIST['Options'] = cmd_options
CMD_LIST['PutBucketLifeCycle'] = cmd_put_bucket_lifecycle
CMD_LIST['GetBucketLifeCycle'] = cmd_get_bucket_lifecycle
CMD_LIST['DeleteBucketLifeCycle'] = cmd_delete_bucket_lifecycle
CMD_LIST['PutBucketReferer'] = cmd_put_bucket_referer
CMD_LIST['GetBucketReferer'] = cmd_get_bucket_referer
def getSuitableKeyValues(keyValues):
newMap = dict()
if keyValues is not None and isinstance(keyValues,dict):
keys = keyValues.keys()
for key in keys:
value = keyValues.get(key)
if value is not None and isinstance(value,list) and len(value)>0:
value = value[0]
newMap[key] = value
return newMap
def getParameterList():
parametersList = ['origin','sts_token', 'force', 'recv_buf_size', 'accesskey', 'part_size', 'retry_times',\
'replace', 'thread_num', 'marker', 'exclude','skip_dir', 'out', 'check_point', 'strip_dir',\
'check_md5','delimiter', 'skip_suffix', 'maxkeys', 'filename_list', 'location', 'temp_dir', \
'method', 'config_file', 'accessid', 'continue_download', 'allow_empty_referer','host',\
'referer', 'content_type', 'data', 'device_id', 'max_part_num', 'acl','headers',\
'part_number', 'upload_id', 'send_buf_size', 'timeout', 'debug', 'multipart_threshold']
return parametersList
def initKeyValues(parametersList):
newMap = dict.fromkeys(parametersList)
return newMap
def getParametersKV(keyValues,parameters):
if isinstance(keyValues,dict) and isinstance(parameters,dict):
keys = parameters.keys()
for item in keyValues:
if item in keys:
parameters[item] = keyValues[item]
return parameters
def getOptionsFromDict(parameters):
if isinstance(parameters,dict):
options = Values(parameters)
return options
def getOperations(operation):
list = []
if operation is not None:
list.append(operation)
return list
def getAvailableOperations():
setup_cmdlist()
return CMD_LIST.keys()
def handleOss():
parser = aliyunCliParser()
operation = parser._getOperations()
keyValues = parser._getKeyValues()
keyValues = parser.getOpenApiKeyValues(keyValues)
keyValues = getSuitableKeyValues(keyValues)
parameterList = getParameterList()
parameters = initKeyValues(parameterList)
parameters = getParametersKV(keyValues,parameters)
options = getOptionsFromDict(parameters)
args = operation
setup_cmdlist()
if args is None or len(args) < 1 or args[0] in HELP_CMD_LIST:
print HELP
sys.exit(1)
if args[0] not in CMD_LIST.keys():
print "unsupported command : %s " % args[0]
print HELP
sys.exit(1)
if options.config_file is not None:
CONFIGFILE = options.config_file
if options.debug is not None:
debug = options.debug
if debug.lower() == "true":
IS_DEBUG = True
else:
IS_DEBUG = False
if options.send_buf_size is not None:
try:
SEND_BUF_SIZE = (int)(options.send_buf_size)
except ValueError:
pass
if options.recv_buf_size is not None:
try:
RECV_BUF_SIZE = (int)(options.recv_buf_size)
except ValueError:
pass
if options.upload_id is not None:
check_upload_id(options.upload_id)
if args[0] != 'Config':
setup_credentials(options)
else:
CMD_LIST['Config'](args, options)
sys.exit(0)
cmd = args[0]
begin = time.time()
try:
res = CMD_LIST[cmd](args, options)
print_result(cmd, res)
except socket.timeout:
print "Socket timeout, please try again later."
sys.exit(1)
except socket.error, args:
print "Connect to oss failed: %s.\nplease check the host name you provided could be reached.\ne.g:" % (args)
print "\tcurl %s\nor\n\tping %s\n" % (OSS_HOST, OSS_HOST)
sys.exit(1)
end = time.time()
sys.stderr.write("%.3f(s) elapsed\n" % (end - begin))
if __name__ == '__main__':
handleOss()
|
lufornpy/aliyun-cli
|
aliyuncli/ossadp/ossHandler.py
|
Python
|
apache-2.0
| 88,246
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run the sequence memory tests on the ApicalDependentTemporalMemory
"""
import random
import unittest
import numpy as np
from nupic.research.frameworks.columns import TripleMemory
from nupic.research.frameworks.columns.shared_tests import SequenceMemoryTestBase
class ApicalDependentTM_BasalSequenceMemoryTests(SequenceMemoryTestBase,
unittest.TestCase):
"""
Run the sequence memory tests on the ApicalDependentTemporalMemory,
passing the sequences in through basal input.
"""
def constructTM(self, columnCount, cellsPerColumn, initialPermanence,
connectedPermanence, minThreshold, sampleSize,
permanenceIncrement, permanenceDecrement,
predictedSegmentDecrement, activationThreshold, seed):
# Use the same apical input on every compute. This is like running the whole
# experiment in one "world" or on one "object". It makes the
# ApicalDependentTemporalMemory behave like traditional sequence memory.
apicalInputSize = 1024
self.constantApicalInput = np.array(
sorted(random.sample(list(range(apicalInputSize)), 40)),
dtype="uint32")
params = {
"columnCount": columnCount,
"cellsPerColumn": cellsPerColumn,
"initialPermanence": initialPermanence,
"connectedPermanence": connectedPermanence,
"minThreshold": minThreshold,
"sampleSize": sampleSize,
"permanenceIncrement": permanenceIncrement,
"permanenceDecrement": permanenceDecrement,
"basalPredictedSegmentDecrement": predictedSegmentDecrement,
# This parameter wreaks havoc if we're holding the apical input constant.
"apicalPredictedSegmentDecrement": 0.0,
"activationThreshold": activationThreshold,
"seed": seed,
"basalInputSize": columnCount * cellsPerColumn,
"apicalInputSize": apicalInputSize,
}
self.tm = TripleMemory(**params)
def compute(self, activeColumns, learn):
activeColumns = np.array(sorted(activeColumns), dtype="uint32")
self.tm.compute(activeColumns,
basalInput=self.tm.getActiveCells(),
basalGrowthCandidates=self.tm.getWinnerCells(),
apicalInput=self.constantApicalInput,
apicalGrowthCandidates=self.constantApicalInput,
learn=learn)
def reset(self):
self.tm.reset()
def getActiveCells(self):
return self.tm.getActiveCells()
def getPredictedCells(self):
return self.tm.getPredictedCells()
class ApicalDependentTM_ApicalSequenceMemoryTests(SequenceMemoryTestBase,
unittest.TestCase):
"""
Run the sequence memory tests on the ApicalDependentTemporalMemory,
passing the sequences in through apical input.
"""
def constructTM(self, columnCount, cellsPerColumn, initialPermanence,
connectedPermanence, minThreshold, sampleSize,
permanenceIncrement, permanenceDecrement,
predictedSegmentDecrement, activationThreshold, seed):
# Use the same basal input on every compute. With this algorithm, basal and
# apical segments are treated equally, so you can do sequence memory on the
# apical segments. There might not be a good reason to do this, but it's
# worth testing that the code is working as expected.
basalInputSize = 1024
self.constantBasalInput = np.array(
sorted(random.sample(list(range(basalInputSize)), 40)),
dtype="uint32")
params = {
"columnCount": columnCount,
"cellsPerColumn": cellsPerColumn,
"initialPermanence": initialPermanence,
"connectedPermanence": connectedPermanence,
"minThreshold": minThreshold,
"sampleSize": sampleSize,
"permanenceIncrement": permanenceIncrement,
"permanenceDecrement": permanenceDecrement,
# This parameter wreaks havoc if we're holding the basal input constant.
"basalPredictedSegmentDecrement": 0.0,
"apicalPredictedSegmentDecrement": predictedSegmentDecrement,
"activationThreshold": activationThreshold,
"seed": seed,
"basalInputSize": basalInputSize,
"apicalInputSize": columnCount * cellsPerColumn,
}
self.tm = TripleMemory(**params)
def compute(self, activeColumns, learn):
activeColumns = np.array(sorted(activeColumns), dtype="uint32")
self.tm.compute(activeColumns,
basalInput=self.constantBasalInput,
basalGrowthCandidates=self.constantBasalInput,
apicalInput=self.tm.getActiveCells(),
apicalGrowthCandidates=self.tm.getWinnerCells(),
learn=learn)
def reset(self):
self.tm.reset()
def getActiveCells(self):
return self.tm.getActiveCells()
def getPredictedCells(self):
return self.tm.getPredictedCells()
|
mrcslws/nupic.research
|
packages/columns/tests/apical_dependent_temporal_memory/adtm_sequence_memory_test.py
|
Python
|
agpl-3.0
| 6,293
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(with_metaclass(Singleton, object)):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
if sys.version_info >= (3,):
# Convert back to text string on python3
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-core'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
|
s-hertel/ansible
|
lib/ansible/utils/display.py
|
Python
|
gpl-3.0
| 19,334
|
from os import environ
from uwsgiconf.presets.nice import Section, PythonSection
def test_nice_section(assert_lines):
assert_lines([
'env = LANG=en_US.UTF-8',
'workers = %k',
'die-on-term = true',
'vacuum = true',
'threads = 4',
], Section(threads=4))
assert_lines([
'logto',
], Section(), assert_in=False)
assert_lines([
'enable-threads = true',
'uid = www-data',
'gid = www-data',
'logto = /a/b.log',
], Section(threads=True, log_into='/a/b.log').configure_owner())
assert_lines([
'workers = 13',
'touch-reload', 'test_nice.py',
], Section(workers=13, touch_reload=__file__))
assert_lines([
'disable-write-exception = true',
'ignore-write-errors = true',
'ignore-sigpipe = true',
'log-master = true',
'threaded-logger = true',
], Section(log_dedicated=True, ignore_write_errors=True))
assert '%(headers) headers in %(hsize) bytes' in Section().get_log_format_default()
def test_get_bundled_static_path(assert_lines):
path = Section.get_bundled_static_path('503.html')
assert path.endswith('uwsgiconf/contrib/django/uwsgify/static/uwsgify/503.html')
def test_configure_https_redirect(assert_lines):
section = Section()
section.configure_https_redirect()
assert_lines(
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
section
)
def test_configure_maintenance_mode(assert_lines, tmpdir):
section = Section()
section.configure_maintenance_mode('/watch/that/file', '/serve/this/file')
section.configure_maintenance_mode('/watch/that/file/also', 'http://pythonz.net')
assert_lines([
'route-if = exists:/watch/that/file static:/serve/this/file',
'route-if = exists:/watch/that/file/also redirect-302:http://pythonz.net',
], section)
afile = tmpdir.join('maintenance_file')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
], section)
assert_lines([
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section, assert_in=False)
# Create file
afile.write('')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
'env = UWSGICONF_MAINTENANCE_INPLACE=1',
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section)
assert environ['UWSGICONF_MAINTENANCE'] == f'{afile}'
assert environ['UWSGICONF_MAINTENANCE_INPLACE'] == '1'
section.configure_maintenance_mode(f'{afile}', 'app::mypack.here.there:myfunc')
assert_lines([
'wsgi = mypack.here.there:myfunc',
], section)
def test_configure_logging_json(assert_lines):
section = Section()
section.configure_logging_json()
assert_lines([
'logger-req = stdio:',
'log-format = %(method) %(uri) -> %(status)',
'log-req-encoder = json {"dt": "${strftime:%%Y-%%m-%%dT%%H:%%M:%%S%%z}", "src": "uwsgi.req"',
'log-req-encoder = nl',
'"src": "uwsgi.out"',
], section)
def test_configure_certbot_https(assert_lines, monkeypatch):
monkeypatch.setattr('pathlib.Path.exists', lambda self: True)
section = Section()
section.configure_certbot_https('mydomain.org', '/var/www/', address=':4443')
assert_lines([
'static-map2 = /.well-known/=/var/www/',
'https-socket = :4443,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
], section)
section = Section.bootstrap(['http://:80'])
section.configure_certbot_https('mydomain.org', '/var/www/', http_redirect=True)
assert_lines([
'shared-socket = :80',
'shared-socket = :443',
'http-socket = =0',
'https-socket = =1,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
], section)
def test_nice_python(assert_lines):
assert_lines([
'plugin = python',
'pyhome = /home/idle/venv/\npythonpath = /home/idle/apps/',
'wsgi = somepackage.module',
'need-app = true',
], PythonSection(
params_python=dict(
# We'll run our app using virtualenv.
python_home='/home/idle/venv/',
search_path='/home/idle/apps/',
),
wsgi_module='somepackage.module',
embedded_plugins=None
))
# Embedded plugins = True
assert_lines('plugin = python', PythonSection(wsgi_module='somepackage.module'), assert_in=False)
|
idlesign/uwsgiconf
|
tests/presets/test_nice.py
|
Python
|
bsd-3-clause
| 4,893
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Notification.silent'
db.add_column(u'ios_notifications_notification', 'silent',
self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Notification.silent'
db.delete_column(u'ios_notifications_notification', 'silent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ios_notifications.apnservice': {
'Meta': {'unique_together': "(('name', 'hostname'),)", 'object_name': 'APNService'},
'certificate': ('django.db.models.fields.TextField', [], {}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'passphrase': ('django_fields.fields.EncryptedCharField', [], {'max_length': '110', 'null': 'True', 'block_type': "'MODE_CBC'", 'cipher': "'AES'", 'blank': 'True'}),
'private_key': ('django.db.models.fields.TextField', [], {})
},
u'ios_notifications.device': {
'Meta': {'unique_together': "(('token', 'service'),)", 'object_name': 'Device'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deactivated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_notified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'os_version': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ios_notifications.APNService']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'ios_devices'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"})
},
u'ios_notifications.feedbackservice': {
'Meta': {'unique_together': "(('name', 'hostname'),)", 'object_name': 'FeedbackService'},
'apn_service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ios_notifications.APNService']"}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ios_notifications.notification': {
'Meta': {'object_name': 'Notification'},
'badge': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_payload': ('django.db.models.fields.CharField', [], {'max_length': '240', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sent_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'loc_payload': ('django.db.models.fields.CharField', [], {'max_length': '240', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ios_notifications.APNService']"}),
'silent': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sound': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
}
}
complete_apps = ['ios_notifications']
|
stephenmuss/django-ios-notifications
|
ios_notifications/south_migrations/0004_auto__add_field_notification_silent.py
|
Python
|
bsd-3-clause
| 7,719
|
# -*- coding: utf-8 -*-
"""Count number of features in all building dataset"""
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
import catatom2osm
import download
import layer
import setup
from osmxml import etree
baseurl = "http://www.catastro.minhap.es/INSPIRE/Buildings/"
fh = codecs.open('count_buildings.csv', 'w', 'utf-8')
ns = {
'atom': 'http://www.w3.org/2005/Atom',
'georss': 'http://www.georss.org/georss',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd'
}
def run():
qgs = catatom2osm.QgsSingleton()
for prov_code in setup.valid_provinces:
url = setup.prov_url['BU'].format(code=prov_code)
response = download.get_response(url)
root = etree.fromstring(response.content)
for entry in root.findall("atom:entry[atom:title]", namespaces=ns):
title = entry.find('atom:title', ns).text
zip_code = title[1:6]
mun = title.replace('buildings', '').strip()[6:]
url = u"{0}{1}/{2}-{3}/A.ES.SDGC.BU.{2}.zip".format(baseurl, prov_code, zip_code, mun)
gml_fn = ".".join((setup.fn_prefix, 'BU', zip_code, 'building.gml'))
download.wget(url, 'temp.zip')
gml = layer.BaseLayer('/vsizip/temp.zip/'+gml_fn, 'temp', 'ogr')
sys.stdout.write(' '*70+'\r')
c = gml.featureCount()
print zip_code, mun, c
fh.write(u'{}\t{}\t{}\n'.format(zip_code, mun, c))
if os.path.exists('temp'):
os.remove('temp')
if __name__ == "__main__":
run()
|
javiersanp/CatAtom2Osm
|
test/count_buildings.py
|
Python
|
bsd-2-clause
| 1,594
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
# pylint: disable=E1102
"""WebSearch services regression tests."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
import time
import os
import traceback
from invenio.testutils import make_test_suite, \
run_test_suite, \
test_web_page_content, \
merge_error_messages
from invenio.webuser import collect_user_info, get_uid_from_email
from invenio.pluginutils import PluginContainer
from invenio.search_engine import create_basic_search_units
from invenio.config import CFG_SITE_NAME, \
CFG_SITE_SECURE_URL
from invenio.websearch_services import \
get_search_services, \
CFG_SEARCH_SERVICES_PATH, \
__required_plugin_API_version__, \
SearchService
class WebSearchServicesLoading(InvenioTestCase):
"""Check loading of WebSearch services"""
def test_search_services_loading_time(self):
"""websearch - speed of loading all search services"""
# Load search services, maybe first time
t1 = time.time()
services = get_search_services()
t2 = time.time()
search_services_loading_time = t2 - t1
# We expect search services on the demo site to be loaded
# in any case under 10 seconds
max_seconds_services_loading_time_first_time = 10
if search_services_loading_time > max_seconds_services_loading_time_first_time:
self.fail("""Loading Search services (from scratch) took too much time:
%s seconds.
Limit: %s seconds""" % (search_services_loading_time,
max_seconds_services_loading_time_first_time))
# Load search services, hopefully from cache
t1 = time.time()
services = get_search_services()
t2 = time.time()
search_services_loading_time = t2 - t1
# We expect search services on the demo site to be loaded
# under 1 second, i.e. retrieved from cache
max_seconds_services_loading_time_from_cache = 1
if search_services_loading_time > max_seconds_services_loading_time_from_cache:
self.fail("""Loading Search services from cache took too much time:
%s seconds.
Limit: %s second""" % (search_services_loading_time,
max_seconds_services_loading_time_from_cache))
def test_search_services_loading(self):
"""websearch - loading default search services"""
error_messages = []
services_classes = [str(service.__class__) for service in get_search_services()]
for expected_service_class in ('CollectionNameSearchService.CollectionNameSearchService',
'FAQKBService.FAQKBService',
'SubmissionNameSearchService.SubmissionNameSearchService'):
if not expected_service_class in services_classes:
error_messages.append('%s not found in list of loaded services.' % expected_service_class)
if error_messages:
self.fail(merge_error_messages(error_messages) + \
'\nList of loaded services: \n%s' % repr(services_classes))
def test_no_broken_search_services_(self):
"""websearch - no broken search services"""
error_messages = []
search_service_plugins = PluginContainer(os.path.join(CFG_SEARCH_SERVICES_PATH, '*Service.py'),
api_version=__required_plugin_API_version__,
plugin_signature=SearchService)
for name, error in search_service_plugins.get_broken_plugins().iteritems():
error_messages.append("Service '%s' could not be loaded:\n%s" % \
(name, repr(error[0]) + " " + repr(error[1]) + "\n" + "\n".join(traceback.format_tb(error[2]))))
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchServicesCollectionNameSearch(InvenioTestCase):
"""Check CollectionNameSearchService plugin"""
def setUp(self):
"""Load plugin"""
search_service_plugins = PluginContainer(os.path.join(CFG_SEARCH_SERVICES_PATH, '*Service.py'),
api_version=__required_plugin_API_version__,
plugin_signature=SearchService)
self.plugin = search_service_plugins.get('CollectionNameSearchService')()
def test_search_restricted_collection_as_guest(self):
"""websearch - search for restricted collection as guest, with CollectionNameSearchService"""
user_info = collect_user_info(0)
pattern = 'Theses'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assertEqual(response,
(0, ''))
def test_search_restricted_collection(self):
"""websearch - search for restricted collection, with CollectionNameSearchService"""
user_info = collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))
pattern = 'Theses'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assert_(response[0] > 75)
self.assert_('collection/Theses' in response[1])
def test_search_public_collection_as_guest(self):
"""websearch - search for public collection as guest, from search page"""
user_info = collect_user_info(0)
pattern = 'Atlantis Times Arts'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assert_(response[0] > 50)
self.assert_('collection/Atlantis%20Times%20Arts' in response[1])
def test_web_search_restricted_collection_as_guest(self):
"""websearch - search for restricted collection as guest, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=Theses&ln=en'
error_messages = test_web_page_content(url,
unexpected_text=['Looking for a particular collection?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_web_search_restricted_collection(self):
"""websearch - search for restricted collection, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=Theses&ln=en'
error_messages = test_web_page_content(url,
username='jekyll',
password='j123ekyll',
expected_text='Looking for a particular collection?',
expected_link_label='Theses',
expected_link_target=CFG_SITE_SECURE_URL + '/collection/Theses?ln=en')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_web_search_public_collection_as_guest(self):
"""websearch - search for public collection as guest, with CollectionNameSearchService"""
url = CFG_SITE_SECURE_URL + '/search?p=Atlantis%20Times%20Arts&ln=en'
error_messages = test_web_page_content(url,
expected_text='Looking for a particular collection?',
expected_link_label='Atlantis Times Arts',
expected_link_target=CFG_SITE_SECURE_URL + '/collection/Atlantis%20Times%20Arts?ln=en')
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchServicesSubmissionNameSearch(InvenioTestCase):
"""Check SubmissionNameSearchService plugin"""
def setUp(self):
"""Load plugin"""
search_service_plugins = PluginContainer(os.path.join(CFG_SEARCH_SERVICES_PATH, '*Service.py'),
api_version=__required_plugin_API_version__,
plugin_signature=SearchService)
self.plugin = search_service_plugins.get('SubmissionNameSearchService')()
def test_search_restricted_submission_as_guest(self):
"""websearch - search for restricted submission as guest, with SubmissionNameSearchService"""
user_info = collect_user_info(0)
pattern = 'submit thesis'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assertEqual(response,
(0, ''))
def test_search_restricted_submission(self):
"""websearch - search for restricted submission, with SubmissionNameSearchService"""
user_info = collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))
pattern = 'submit thesis'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assert_(response[0] >=50)
self.assert_('doctype=DEMOTHE' in response[1])
def test_search_restricted_submission_category(self):
"""websearch - search for restricted submission, with SubmissionNameSearchService"""
user_info = collect_user_info(1)
pattern = 'submit news'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assert_(response[0] >=50)
self.assert_('doctype=DEMOJRN' in response[1])
def test_search_public_submission_as_guest(self):
"""websearch - search for public submission as guest, with SubmissionNameSearchService"""
user_info = collect_user_info(0)
pattern = 'submit article'
search_units = create_basic_search_units(None, pattern, '')
response = self.plugin.answer(req=user_info, user_info=user_info, of='hb',
cc=CFG_SITE_NAME, colls_to_search='', p=pattern,
f='', search_units=search_units, ln='en')
self.assert_(response[0] >= 50)
self.assert_('doctype=DEMOART' in response[1])
def test_web_search_restricted_submission_as_guest(self):
"""websearch - search for restricted submission as guest, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=submit%20thesis&ln=en'
error_messages = test_web_page_content(url,
unexpected_text=['Demo Thesis Submission',
'Looking for a particular submission?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_web_search_restricted_submission(self):
"""websearch - search for restricted submission, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=submit%20thesis&ln=en'
error_messages = test_web_page_content(url,
username='jekyll',
password='j123ekyll',
expected_text='Looking for a particular submission?',
expected_link_label='Demo Thesis Submission',
expected_link_target=CFG_SITE_SECURE_URL + '/submit?doctype=DEMOTHE&ln=en')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_web_search_restricted_submission_category(self):
"""websearch - search for restricted submission, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=submit%20news&ln=en'
error_messages = test_web_page_content(url,
username='juliet',
password='j123uliet',
expected_text='Looking for a particular submission?',
expected_link_label='News (Demo Journal Submission)',
expected_link_target=CFG_SITE_SECURE_URL + '/submit?doctype=DEMOJRN&ln=en')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_web_search_public_submission_as_guest(self):
"""websearch - search for public submission as guest, from search page"""
url = CFG_SITE_SECURE_URL + '/search?p=submit%20article&ln=en'
error_messages = test_web_page_content(url,
expected_text='Looking for a particular submission?',
expected_link_label='Demo Article Submission',
expected_link_target=CFG_SITE_SECURE_URL + '/submit?doctype=DEMOART&ln=en')
if error_messages:
self.fail(merge_error_messages(error_messages))
TEST_SUITE = make_test_suite(WebSearchServicesLoading,
WebSearchServicesCollectionNameSearch,
WebSearchServicesSubmissionNameSearch)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
jmartinm/invenio
|
modules/websearch/lib/websearch_services_regression_tests.py
|
Python
|
gpl-2.0
| 15,343
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Fixtures for users, roles and actions."""
from __future__ import absolute_import, division, print_function
from flask import current_app
from flask_security.utils import hash_password
from invenio_access.models import ActionRoles
from invenio_accounts.models import Role
from invenio_db import db
def init_roles():
ds = current_app.extensions['invenio-accounts'].datastore
with db.session.begin_nested():
ds.create_role(
name='superuser',
description='admin with no restrictions'
)
ds.create_role(
name='cataloger',
description='users with editing capabilities'
)
ds.create_role(
name='hermescurator',
description='curator for HERMES Internal Notes'
)
ds.create_role(
name='hermescoll',
description='HERMES Collaboration access to Internal Notes'
)
db.session.commit()
def init_users():
"""Sample users, not to be used in production."""
ds = current_app.extensions['invenio-accounts'].datastore
superuser = Role.query.filter_by(name='superuser').one()
cataloger = Role.query.filter_by(name='cataloger').one()
hermes_curator = Role.query.filter_by(name='hermescurator').one()
hermes_collections = Role.query.filter_by(name='hermescoll').one()
with db.session.begin_nested():
ds.create_user(
email='admin@inspirehep.net',
password=hash_password("123456"),
active=True,
roles=[superuser]
)
ds.create_user(
email='cataloger@inspirehep.net',
password=hash_password("123456"),
active=True,
roles=[cataloger]
)
ds.create_user(
email='hermescataloger@inspirehep.net',
password=hash_password("123456"),
active=True,
roles=[hermes_curator, hermes_collections]
)
ds.create_user(
email='johndoe@inspirehep.net',
password=hash_password("123456"),
active=True
)
db.session.commit()
def init_permissions():
superuser = Role.query.filter_by(name='superuser').one()
cataloger = Role.query.filter_by(name='cataloger').one()
hermes_collections = Role.query.filter_by(name='hermescoll').one()
hermes_curator = Role.query.filter_by(name='hermescurator').one()
db.session.add(ActionRoles(
action='superuser-access',
role=superuser)
)
db.session.add(ActionRoles(
action='admin-access',
role=superuser)
)
db.session.add(ActionRoles(
action='workflows-ui-admin-access',
role=cataloger)
)
db.session.add(ActionRoles(
action='admin-holdingpen-authors',
role=cataloger)
)
db.session.add(ActionRoles(
action='view-restricted-collection',
argument='HERMES Internal Notes',
role=hermes_collections)
)
db.session.add(ActionRoles(
action='update-collection',
role=cataloger)
)
db.session.add(ActionRoles(
action='editor-use-api',
role=cataloger)
)
db.session.add(ActionRoles(
action='update-collection',
argument='HERMES Internal Notes',
role=hermes_curator)
)
db.session.commit()
def init_users_and_permissions():
init_roles()
init_users()
init_permissions()
|
zzacharo/inspire-next
|
inspirehep/modules/fixtures/users.py
|
Python
|
gpl-3.0
| 4,337
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debug the tf-learn iris example, based on the tf-learn tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python import debug as tf_debug
# URLs to download data sets from, if necessary.
IRIS_TRAINING_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_training.csv"
IRIS_TEST_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_test.csv"
def maybe_download_data(data_dir):
"""Download data sets if necessary.
Args:
data_dir: Path to where data should be downloaded.
Returns:
Paths to the training and test data files.
"""
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
training_data_path = os.path.join(data_dir,
os.path.basename(IRIS_TRAINING_DATA_URL))
if not os.path.isfile(training_data_path):
train_file = open(training_data_path, "wt")
urllib.request.urlretrieve(IRIS_TRAINING_DATA_URL, train_file.name)
train_file.close()
print("Training data are downloaded to %s" % train_file.name)
test_data_path = os.path.join(data_dir, os.path.basename(IRIS_TEST_DATA_URL))
if not os.path.isfile(test_data_path):
test_file = open(test_data_path, "wt")
urllib.request.urlretrieve(IRIS_TEST_DATA_URL, test_file.name)
test_file.close()
print("Test data are downloaded to %s" % test_file.name)
return training_data_path, test_data_path
_IRIS_INPUT_DIM = 4
def iris_input_fn():
iris = base.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def main(_):
# Load datasets.
if FLAGS.fake_data:
training_set = tf.contrib.learn.datasets.base.Dataset(
np.random.random([120, 4]),
np.random.random_integers(3, size=[120]) - 1)
test_set = tf.contrib.learn.datasets.base.Dataset(
np.random.random([30, 4]),
np.random.random_integers(3, size=[30]) - 1)
else:
training_data_path, test_data_path = maybe_download_data(FLAGS.data_dir)
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=training_data_path,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=test_data_path, target_dtype=np.int, features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
model_dir = FLAGS.model_dir or tempfile.mkdtemp(prefix="debug_tflearn_iris_")
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
hooks = None
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook(ui_type=FLAGS.ui_type,
dump_root=FLAGS.dump_root)
elif FLAGS.tensorboard_debug_address:
debug_hook = tf_debug.TensorBoardDebugHook(FLAGS.tensorboard_debug_address)
hooks = [debug_hook]
if not FLAGS.use_experiment:
# Fit model.
classifier.fit(x=training_set.data,
y=training_set.target,
steps=FLAGS.train_steps,
monitors=hooks)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target,
hooks=hooks)["accuracy"]
else:
ex = experiment.Experiment(classifier,
train_input_fn=iris_input_fn,
eval_input_fn=iris_input_fn,
train_steps=FLAGS.train_steps,
eval_delay_secs=0,
eval_steps=1,
train_monitors=hooks,
eval_hooks=hooks)
ex.train()
accuracy_score = ex.evaluate()["accuracy"]
print("After training %d steps, Accuracy = %f" %
(FLAGS.train_steps, accuracy_score))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/iris_data",
help="Directory to save the training and test data in.")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Directory to save the trained model in.")
parser.add_argument(
"--train_steps",
type=int,
default=10,
help="Number of steps to run trainer.")
parser.add_argument(
"--use_experiment",
type="bool",
nargs="?",
const=True,
default=False,
help="Use tf.contrib.learn Experiment to run training and evaluation")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--fake_data",
type="bool",
nargs="?",
const=True,
default=False,
help="Use fake MNIST data for unit testing")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--dump_root",
type=str,
default="",
help="Optional custom root directory for temporary debug dump data")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
rabipanda/tensorflow
|
tensorflow/python/debug/examples/debug_tflearn_iris.py
|
Python
|
apache-2.0
| 7,267
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from otdistfunc import wrapper_data
import os
print('== test WrapperDataCoreIn')
# write
wd = wrapper_data.WrapperDataCoreIn()
wd.point = [1, 2]
wd.write()
# read
wd_r = wrapper_data.WrapperDataCoreIn()
wd_r.read()
print('read ' + str(wd_r.point))
# get data
print(wrapper_data.get_data(wd.get_fullname()))
# clean
os.remove(wd.get_fullname())
print('== test WrapperDataCoreOut')
# write
wd = wrapper_data.WrapperDataCoreOut()
wd.point = [2, 3]
wd.err_msg = 'an error'
wd.write()
# read
wd_r = wrapper_data.WrapperDataCoreOut()
wd_r.read()
print('read ' + str(wd_r.point))
print('read ' + str(wd_r.err_msg))
# get data
print(wrapper_data.get_data(wd_r.get_fullname()))
# clean
os.remove(wd.get_fullname())
print('== test WrapperDataHostIn')
# write
wd = wrapper_data.WrapperDataHostIn()
wd.hostname = "tata"
wd.sample = [[2, 3, 4], [5, 6, 2]]
wd.first_id = 2
wd.workdir_basename = 'temp132'
wd.workdir = '/temp/tmp56'
wd.tmpdir = '/temp'
wd.remote_tmpdir = '/temp'
wd.n_cores = 2
wd.files_to_send = ['us.py']
wd.separate_workdir = False
wd.cleanup = 'all'
wd.write()
# read
wd_r = wrapper_data.WrapperDataHostIn()
wd_r.hostname = wd.hostname
wd_r.read()
print(wd_r.hostname)
print(wd_r.sample)
print(wd_r.first_id)
print(wd_r.workdir_basename)
print(wd_r.workdir)
print(wd_r.tmpdir)
print(wd_r.remote_tmpdir)
print(wd_r.n_cores)
print(wd_r.files_to_send)
print(wd_r.separate_workdir)
print(wd_r.cleanup)
# get data
print(wrapper_data.get_data(wd.get_fullname()))
# clean
os.remove(wd.get_fullname())
print('== test WrapperDataHostOut')
# write
wd = wrapper_data.WrapperDataHostOut(remote=True)
wd.hostname = "tata"
wd.sample = [[4, 3, 2], [8, 6, 2]]
wd.add_debug("start point")
wd.add_error(2, "error on point")
wd.add_point(3, None, 45)
wd.add_warn("toto", "warning")
wd.write_sample()
# read
wd_r = wrapper_data.WrapperDataHostOut()
wd_r.hostname = wd.hostname
wd_r.read()
print(wd_r.sample)
print('log')
log = wd_r.get_next_log()
while log:
flag = log[0]
data = log[2]
print(' ' + flag + ':' + str(data))
log = wd_r.get_next_log()
# get data
print(wrapper_data.get_data(wd.get_fullname()))
# clean
os.remove(wd.get_fullname())
print('== test uncomplete WrapperDataHostOut')
# write
wd = wrapper_data.WrapperDataHostOut(remote=True)
wd.hostname = "toto"
wd.add_point(1, [8, 5, 6], 45)
wd.add_point(3, [5, 3, 2], 42)
# read
wd_r = wrapper_data.WrapperDataHostOut()
wd_r.hostname = wd.hostname
wd_r.read()
# get data
print(wrapper_data.get_data(wd.get_fullname()))
# clean
#os.remove(wd.get_fullname())
# print '== test WrapperDataHostsIn'
# print '== test WrapperDataHostsOut'
|
openturns/otdistfunc
|
python/test/t_wrapper_data.py
|
Python
|
lgpl-3.0
| 2,706
|
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_query
from livestreamer.stream import RTMPStream
BALANCER_URL = "http://www.mips.tv:1935/loadbalancer"
PLAYER_URL = "http://mips.tv/embedplayer/{0}/1/500/400"
SWF_URL = "http://mips.tv/content/scripts/eplayer.swf"
_url_re = re.compile("http(s)?://(\w+.)?mips.tv/(?P<channel>[^/&?]+)")
_flashvars_re = re.compile("'FlashVars', '([^']+)'")
_rtmp_re = re.compile("redirect=(.+)")
_schema = validate.Schema(
validate.transform(_flashvars_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_query),
{
"id": validate.transform(int),
validate.optional("s"): validate.text
}
)
)
)
_rtmp_schema = validate.Schema(
validate.transform(_rtmp_re.search),
validate.get(1),
)
class Mips(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
headers = {"Referer": self.url}
url = PLAYER_URL.format(channel)
res = http.get(url, headers=headers, schema=_schema)
if not res or "s" not in res:
return
streams = {}
server = http.get(BALANCER_URL, headers=headers, schema=_rtmp_schema)
playpath = "{0}?{1}".format(res["s"], res["id"])
streams["live"] = RTMPStream(self.session, {
"rtmp": "rtmp://{0}/live/{1}".format(server, playpath),
"pageUrl": self.url,
"swfVfy": SWF_URL,
"conn": "S:OK",
"live": True
})
return streams
__plugin__ = Mips
|
Masaz-/livestreamer
|
src/livestreamer/plugins/mips.py
|
Python
|
bsd-2-clause
| 1,840
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('servo', '0022_auto_20150612_0803'),
]
operations = [
migrations.AlterField(
model_name='device',
name='password',
field=models.CharField(default=b'', max_length=32, verbose_name='password', blank=True),
),
migrations.AlterField(
model_name='device',
name='product_line',
field=models.CharField(default=b'OTHER', max_length=16, verbose_name='Product Line', choices=[(b'IPODCLASSIC', b'iPod Classic'), (b'POWERMAC', b'Power Mac'), (b'APPLETV', b'Apple TV'), (b'IMAC', b'iMac'), (b'OTHER', b'Other Products'), (b'MACBOOKAIR', b'MacBook Air'), (b'DISPLAYS', b'Display'), (b'IPODTOUCH', b'iPod Touch'), (b'MACPRO', b'Mac Pro'), (b'IPODNANO', b'iPod nano'), (b'IPAD', b'iPad'), (b'MACBOOK', b'MacBook'), (b'MACACCESSORY', b'Mac Accessory'), (b'MACMINI', b'Mac mini'), (b'SERVER', b'Server'), (b'BEATS', b'Beats Products'), (b'IPHONE', b'iPhone'), (b'IPHONEACCESSORY', b'iPhone Accessory'), (b'IPODSHUFFLE', b'iPod Shuffle'), (b'MACBOOKPRO', b'MacBook Pro')]),
),
]
|
filipp/Servo
|
servo/migrations/0023_auto_20150612_0822.py
|
Python
|
bsd-2-clause
| 1,255
|
# Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a2j43z$c+s#v@9^7$zlqjty)s--v655_8*lkkvuz7&d-6(9d=v'
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
|
Charnelx/django-split-settings
|
tests/settings/components/base.py
|
Python
|
bsd-3-clause
| 610
|
import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 8,160);
|
antoinecarme/pyaf
|
tests/exog/random/random_exog_8_160.py
|
Python
|
bsd-3-clause
| 101
|
"""Example using Sigopt's multi-objective functionality."""
import sys
import time
import numpy as np
from ray import tune
from ray.tune.suggest.sigopt import SigOptSearch
np.random.seed(0)
vector1 = np.random.normal(0, 0.1, 100)
vector2 = np.random.normal(0, 0.1, 100)
def evaluate(w1, w2):
total = w1 * vector1 + w2 * vector2
return total.mean(), total.std()
def easy_objective(config):
# Hyperparameters
w1 = config["w1"]
w2 = config["total_weight"] - w1
average, std = evaluate(w1, w2)
tune.report(average=average, std=std, sharpe=average / std)
time.sleep(0.1)
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
if "SIGOPT_KEY" not in os.environ:
if args.smoke_test:
print("SigOpt API Key not found. Skipping smoke test.")
sys.exit(0)
else:
raise ValueError(
"SigOpt API Key not found. Please set the SIGOPT_KEY "
"environment variable."
)
space = [
{
"name": "w1",
"type": "double",
"bounds": {"min": 0, "max": 1},
},
]
algo = SigOptSearch(
space,
name="SigOpt Example Multi Objective Experiment",
observation_budget=4 if args.smoke_test else 100,
max_concurrent=1,
metric=["average", "std", "sharpe"],
mode=["max", "min", "obs"],
)
analysis = tune.run(
easy_objective,
name="my_exp",
search_alg=algo,
num_samples=4 if args.smoke_test else 100,
config={"total_weight": 1},
)
print(
"Best hyperparameters found were: ", analysis.get_best_config("average", "min")
)
|
ray-project/ray
|
python/ray/tune/examples/sigopt_multi_objective_example.py
|
Python
|
apache-2.0
| 1,895
|
# -- encoding: utf-8 --
from __future__ import with_statement
from stackspy.utils import url_replace
def detect_robots(context):
robots = context.get_url(url_replace(context.base_url, path="/robots.txt"))
if robots.exists:
robots.add_result("Robots.txt", "Robots.txt exists")
def detect_humans(context):
humans = context.get_url(url_replace(context.base_url, path="/humans.txt"))
if humans.exists:
humans.add_result("Humans.txt", "Humans.txt exists")
def detect_without_headers_quirks(context):
without_headers = context.get_url("/?without-headers")
without_headers.request.headers.clear()
if without_headers.status_code != 200:
without_headers.add_result("Quirk", "Server acts oddly when sent request without headers")
|
akx/stackspy
|
stackspy/detectors/misc.py
|
Python
|
mit
| 738
|
import pandas as pd
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
frames = [df1, df2, df3]
# simple concat
result1 = pd.concat(frames)
print(result1)
# outer
#print(pd.concat(objs, axis=0, join='outer', ignore_index=False, keys=None,
# levels=None, names=None, verify_integrity=False, copy=True))
#
result2 = pd.concat(frames, keys=['x', 'y', 'z'])
print(result2)
print(result2.loc['y'])
df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
'D': ['D2', 'D3', 'D6', 'D7'],
'F': ['F2', 'F3', 'F6', 'F7']},
index=[2, 3, 6, 7])
result = pd.concat([df1, df4], axis=1, sort=False)
print(result)
result = pd.concat([df1, df4], axis=1, join='inner')
print(result)
result = df1.append(df2)
print(result)
result = df1.append(df4, sort=False)
print(result)
result = df1.append([df2, df3])
print(result)
result = pd.concat([df1, df4], ignore_index=True, sort=False)
print(result)
result = df1.append(df4, ignore_index=True, sort=False)
print(result)
s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X')
result = pd.concat([df1, s1], axis=1)
print(result)
s2 = pd.Series(['_0', '_1', '_2', '_3'])
result = pd.concat([df1, s2, s2, s2], axis=1)
print(result)
result = pd.concat([df1, s1], axis=1, ignore_index=True)
print(result)
s3 = pd.Series([0, 1, 2, 3], name='foo')
s4 = pd.Series([0, 1, 2, 3])
s5 = pd.Series([0, 1, 4, 5])
print(pd.concat([s3, s4, s5], axis=1))
print(pd.concat([s3, s4, s5], axis=1, keys=['red', 'blue', 'yellow']))
result = pd.concat(frames, keys=['x', 'y', 'z'])
print(result)
pieces = {'x': df1, 'y': df2, 'z': df3}
result = pd.concat(pieces)
print(result)
result = pd.concat(pieces, keys=['z', 'y'])
print(result.index.levels)
result = pd.concat(pieces, keys=['x', 'y', 'z'],
levels=[['z', 'y', 'x', 'w']],
names=['group_key'])
print(result)
print(result.index.levels)
s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D'])
result = df1.append(s2, ignore_index=True)
print(result)
dicts = [{'A': 1, 'B': 2, 'C': 3, 'X': 4},
{'A': 5, 'B': 6, 'C': 7, 'Y': 8}]
result = df1.append(dicts, ignore_index=True, sort=False)
print(result)
|
davidam/python-examples
|
pandas/pandas-concat.py
|
Python
|
gpl-3.0
| 2,930
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# longer_weather.py - Collect GIFs, split into frames, collect frames, then
# merge them together to create one long animation.
# Originally used to create long weather radar animations
# but may work with other hosted GIFs.
#
# Copyright (C) 2015 Kyle T. Gabriel
#
# Mycodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycodo. If not, see <http://www.gnu.org/licenses/>.
#
# Contact at kylegabriel.com
#
# Raspberry Pi:
# sudo apt-get install python-numpy imagemagick
# sudo pip install pillow imagehash
#
import argparse
import hashlib
import imagehash
import logging
import os
import subprocess
import sys
import time
import urllib
from PIL import Image
from datetime import datetime
from datetime import timedelta
from threading import Thread
from config import GIF_HTTP_FILES
from config import LOG_PATH
from config import FRAME_PATH
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
logger = logging.getLogger()
fileHandler = logging.FileHandler(LOG_PATH)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
class LongerWeather(Thread):
"""Longer Weather"""
def __init__(self):
Thread.__init__(self)
logger.info('Daemon Started')
self.start_time = datetime.now()
self.timer = {}
self.file_time = {}
for gif_address in GIF_HTTP_FILES:
self.timer[gif_address['file_prefix']] = datetime.now()
self.file_time[gif_address['file_prefix']] = (
datetime.utcnow().replace(minute=8) - timedelta(hours=4))
self.running = True
def run(self):
while self.running:
present = datetime.now()
# Download and save the original GIFs
for gif_address in GIF_HTTP_FILES:
unique_name = '{base}_{prefix}'.format(
base=gif_address['base_address'].split('/')[-1],
prefix=gif_address['file_prefix'])
path_frames = os.path.join(FRAME_PATH, unique_name)
assure_path_exists(path_frames)
if present > self.timer[gif_address['file_prefix']]:
self.timer[gif_address['file_prefix']] += timedelta(minutes=gif_address['update_min'])
frame_good = True
new_frames = False
while frame_good:
filename = self.file_time[gif_address['file_prefix']].strftime(
'{prefix}_%Y%m%d_%H%M.gif'.format(
prefix=gif_address['file_prefix']))
save_file = os.path.join(path_frames, filename)
dl_address = '{address}/{filename}'.format(
address=gif_address['base_address'],
filename=filename)
if self.get_gif(dl_address, save_file):
new_frames = True
self.file_time[gif_address['file_prefix']] += timedelta(minutes=10)
else:
frame_good = False
if new_frames:
# Create GIF from frames
gif_save = os.path.join(
FRAME_PATH, '{name}.gif'.format(name=unique_name))
self.create_gif(path_frames, gif_save, gif_address)
# Create webm from GIF
# webm_save = os.path.join(
# FRAME_PATH, '{name}.webm'.format(name=name_root))
# self.create_webm(gif_save, webm_save)
logger.info('Next Grab: {next:.2f} minutes'.format(
next=(self.timer[gif_address['file_prefix']] - present).total_seconds() / 60))
time.sleep(1)
@staticmethod
def create_gif(path_frames, gif_save, gif_settings):
""" Use images in each frames subdirectory to create a GIF """
# Delete any frames greater than the max limit
files_all = sorted(next(os.walk(path_frames))[2])
while len(files_all) > gif_settings['frames_max']:
os.remove(os.path.join(path_frames, files_all[0]))
files_all = sorted(next(os.walk(path_frames))[2])
logger.info('Generating {f}'.format(f=gif_save))
logger.info('from {f}/*.png'.format(f=path_frames))
cmd = "ffmpeg -y -framerate {frate} -pattern_type glob -i '{path}/*.png' {gif}".format(
frate=gif_settings['animation_speed'],
path=path_frames,
gif=gif_save)
logger.info('CMD: {f}'.format(f=cmd))
subprocess.call(cmd, shell=True)
# cmd = 'convert {gif} -fuzz 10% -layers Optimize {gif}.opt.gif'.format(
# gif=gif_save)
# logger.info('CMD: {f}'.format(f=cmd))
# subprocess.call(cmd, shell=True)
# cmd = 'gifsicle -O3 {gif} -o {gif}.opt.gif'.format(
# gif=gif_save)
# logger.info('CMD: {f}'.format(f=cmd))
# subprocess.call(cmd, shell=True)
# cmd = "ffmpeg -y -framerate 10 -pattern_type glob -i '{path}/*.png' -f image2pipe -vcodec ppm - | " \
# "convert -layers Optimize - {gif}".format(
# path=path_frames,
# gif=gif_save)
# logger.info('CMD: {f}'.format(f=cmd))
# subprocess.call(cmd, shell=True)
# last_file = sorted(next(os.walk(path_frames))[2])[-1]
# cmd = 'convert -delay 10 -loop 0 {path}/*.gif -delay 200 {path}/{last_file} {gif}'.format(
# path=path_frames,
# last_file=last_file,
# gif=gif_save)
# Get last file to pause GIF
# last_file = os.path.join(
# gif['path_frames'],
# sorted(next(os.walk(gif['path_frames']))[2])[-1])
# cmd = 'convert -delay {speed} {path}/*.png -delay 200 {last} -loop 0 {spath}'.format(
# speed=gif_settings['animation_speed'],
# path=gif['path_frames'],
# last=last_file,
# spath=gif['new_gif'])
# logger.info('CMD: {f}'.format(f=cmd))
# subprocess.call(cmd, shell=True)
@staticmethod
def create_webm(gif_save, webm_save):
logger.info("Creating {webm}".format(webm=webm_save))
cmd = 'ffmpeg -y -i {gif}.opt.gif -c:v libvpx -crf 50 -b:v 256K -auto-alt-ref 0 {webm}'.format(
gif=gif_save,
webm=webm_save)
logger.info('CMD: {f}'.format(f=cmd))
subprocess.call(cmd, shell=True)
@staticmethod
def get_gif(gif_address, gif_saved):
""" Download a GIF """
logger.info('Get {f}'.format(f=gif_address))
urllib.urlretrieve(gif_address, gif_saved)
logger.info('Saved {f}'.format(f=gif_saved))
if '404 Not Found' in open(gif_saved).read():
os.remove(gif_saved)
return False
file_png = '{name}.png'.format(name=gif_saved.split('.')[0])
cmd = 'convert {gif} {png}'.format(
gif=gif_saved,
png=file_png)
logger.info('CMD: {f}'.format(f=cmd))
subprocess.call(cmd, shell=True)
os.remove(gif_saved)
cmd = 'pngquant --quality 2-10 64 {png}'.format(
png=file_png)
logger.info('CMD: {f}'.format(f=cmd))
subprocess.call(cmd, shell=True)
os.remove(file_png)
return True
@staticmethod
def hash_generate(frame, size):
return str(imagehash.dhash(Image.open(frame), hash_size=size))
@staticmethod
def remove_duplicates(dir):
unique = []
root_path, _, file_names = next(os.walk(dir))
for filename in sorted(file_names):
file_path = os.path.join(root_path, filename)
if os.path.isfile(file_path):
filehash = hashlib.md5(file(file_path).read()).hexdigest()
if filehash not in unique:
unique.append(filehash)
else:
os.remove(file_path)
def terminate(self):
self.running = False
def assure_path_exists(dir_path):
""" Create path if it doesn't exist """
if not os.path.exists(dir_path):
os.makedirs(dir_path)
os.chmod(dir_path, 0774)
return dir_path
def delete_images(dir_path):
""" Delete all GIFs and PNGs in """
file_list = [f for f in os.listdir(dir_path) if f.endswith('.gif') or f.endswith('.png')]
for f in file_list:
file_del = os.path.join(dir_path, f)
os.remove(file_del)
def parseargs(parse):
parse.add_argument('-d', '--delete', action='store_true',
help='Delete all current images')
return parse.parse_args()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Longer Weather')
args = parseargs(parser)
if args.delete:
for dir_name, dir_names, file_names in os.walk(FRAME_PATH):
for each_directory in dir_names:
path = os.path.join(dir_name, each_directory)
logger.info('Deleting {f}'.format(f=path))
delete_images(path)
longer_weather = LongerWeather()
longer_weather.daemon = True
longer_weather.start()
try:
while longer_weather.is_alive():
time.sleep(1)
except KeyboardInterrupt:
logger.error('Keyboard exit')
longer_weather.terminate()
sys.exit(1)
|
kizniche/longer-weather
|
longer_weather.py
|
Python
|
gpl-3.0
| 10,250
|
#
# The Python Imaging Library.
# $Id$
#
# SGI image file handling
#
# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli.
# <ftp://ftp.sgi.com/graphics/SGIIMAGESPEC>
#
# History:
# 1995-09-10 fl Created
#
# Copyright (c) 2008 by Karsten Hiddemann.
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1995 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import Image, ImageFile
def i16(c):
return ord(c[1]) + (ord(c[0])<<8)
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
def _accept(prefix):
return i16(prefix) == 474
##
# Image plugin for SGI images.
class SgiImageFile(ImageFile.ImageFile):
format = "SGI"
format_description = "SGI Image File Format"
def _open(self):
# HEAD
s = self.fp.read(512)
if i16(s) != 474:
raise SyntaxError("not an SGI image file")
# relevant header entries
compression = ord(s[2])
# bytes, dimension, zsize
layout = ord(s[3]), i16(s[4:]), i16(s[10:])
# determine mode from bytes/zsize
if layout == (1, 2, 1) or layout == (1, 1, 1):
self.mode = "L"
elif layout == (1, 3, 3):
self.mode = "RGB"
elif layout == (1, 3, 4):
self.mode = "RGBA"
else:
raise SyntaxError("unsupported SGI image mode")
# size
self.size = i16(s[6:]), i16(s[8:])
# decoder info
if compression == 0:
offset = 512
pagesize = self.size[0]*self.size[1]*layout[0]
self.tile = []
for layer in self.mode:
self.tile.append(("raw", (0,0)+self.size, offset, (layer,0,-1)))
offset = offset + pagesize
elif compression == 1:
self.tile = [("sgi_rle", (0,0)+self.size, 512, (self.mode, 0, -1))]
#
# registry
Image.register_open("SGI", SgiImageFile, _accept)
Image.register_extension("SGI", ".bw")
Image.register_extension("SGI", ".rgb")
Image.register_extension("SGI", ".rgba")
Image.register_extension("SGI", ".sgi") # really?
|
zhaochl/python-utils
|
verify_code/Imaging-1.1.7/build/lib.linux-x86_64-2.7/SgiImagePlugin.py
|
Python
|
apache-2.0
| 2,168
|
import pkg_resources
from .vggish_params import *
MODEL_PARAMS = pkg_resources.resource_filename(
__name__, '.model/vggish_model.cpkt')
PCA_PARAMS = pkg_resources.resource_filename(
__name__, '.model/vggish_pca_params.npz')
|
cosmir/dev-set-builder
|
audioset/__init__.py
|
Python
|
mit
| 235
|
from django.db import models
class Empresa(models.Model):
nombre = models.CharField(max_length=100)
ciudad = models.CharField(max_length=50)
sector = models.CharField(max_length=200)
def __str__(self):
return self.nombre
class Calificacion(models.Model):
alumno = models.CharField(max_length=100)
calificacion = models.IntegerField(default=0)
empresa = models.ForeignKey(Empresa)
def __str__(self):
return self.alumno
|
JesGor/test_rest
|
apprest/models.py
|
Python
|
gpl-2.0
| 432
|
import os
def get_files_list(path_dir):
return os.listdir(path_dir)
def absolute_path_file(path_file):
return os.path.abspath(path_file)
def extract_file_from_path_file(path_file):
path, file = os.path.split(path_file)
return file
def extract_path_from_file(file):
path_file = os.path.abspath(os.path.dirname(file))
path, file = os.path.split(path_file)
return path
def join_paths(path1, path2):
return os.path.join(path1, path2)
def file_exists(path_file):
return os.path.isfile(path_file)
def dir_exists(dirname):
return os.path.isdir(dirname)
|
paulinus/OpenDroneMap
|
opendm/io.py
|
Python
|
gpl-3.0
| 600
|
#coding=utf-8
'''
Created on 2015-4-3
@author: Devuser
'''
from gateside.autotesting.settings import GlobalConfig
from gateside.autotesting.Gat.manager.istepparametermanager import IStepParameterManager
from gateside.autotesting.Gat.util.stepvaluepool import StepValuePool
class TestStep(object):
'''
测试Stepmethod
'''
def __init__(self):
'''
Constructor
'''
pass
def setpmethod1(self,parameterID):
istepparametermanager=IStepParameterManager()
parameter=istepparametermanager.get_item("InlandHotelListHotelFilterCheck")
print(parameter.Parameters["locationFilter"])
valuepool=StepValuePool()
valuepool.put_value("demo","12345678")
def setpmethod2(self,parameterID):
valuepool=StepValuePool()
print(valuepool.get_value("demo"))
|
skyzhang2006/PyGAT
|
PyGAT/gateside/autotesting/iat_stepgroups/teststep.py
|
Python
|
gpl-2.0
| 884
|
import numpy as np
class Mesh(object):
"""
Mesh design and storage. Currently the left edge is fixed at 0.0
"""
def __init__(self, delta_x=(0.1 * np.ones(10))):
"""
Sum the spacings to get cell centers,
but the cell center is at the halfway point, so we have to
subtract half of delta_x again.
"""
self.delta_x = delta_x
right_edges = np.cumsum(self.delta_x)
self.centers = right_edges - self.delta_x / 2.0
self.edges = np.insert(right_edges, 0, 0.0)
@property
def x(self):
return self.centers
@property
def left_edge(self):
return 0.0
@property
def right_edge(self):
return self.domain_width
@property
def domain_width(self):
return self.edges[-1]
def extend_edges(self, extension):
delta_xi = self.delta_x[0]
delta_xf = self.delta_x[-1]
padded_edges = self.edges
for i in range(1, extension + 1):
padded_edges = np.insert(padded_edges, 0,
self.edges[0] - i * delta_xi)
padded_edges = np.append(padded_edges,
self.edges[-1] + i * delta_xf)
return padded_edges
def test_extend_edges():
delta_x = np.array([0.5, 1.0, 0.1, 2.0])
m = Mesh(delta_x)
extended = m.extend_edges(2)
assert(extended[0] == extended[2] - 1.0)
assert(extended[1] == extended[2] - 0.5)
assert(extended[-2] == extended[-3] + 2.0)
assert(extended[-1] == extended[-3] + 4.0)
def test_mesh_simple():
delta_x = np.array([0.5, 1.0, 0.1, 2.0])
correct_cen = np.array([0.25, 1.0, 1.55, 2.6])
correct_edge = np.array([0.0, 0.5, 1.5, 1.6, 3.6])
m = Mesh(delta_x)
assert((m.x == correct_cen).all())
assert((m.edges == correct_edge).all())
assert(m.domain_width == 3.6)
assert(m.right_edge == 3.6)
assert(m.left_edge == 0.0)
|
tbenthompson/rupturotops
|
source/core/mesh.py
|
Python
|
lgpl-3.0
| 1,954
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
import feedparser
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.search import torrent_availability, normalize_unicode, clean_title
log = logging.getLogger('kat')
class SearchKAT(object):
"""KAT search plugin.
should accept:
kat:
category: <category>
verified: yes/no
categories:
all
movies
tv
music
books
xxx
other
"""
schema = {
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': ['all', 'movies', 'tv', 'music', 'books', 'xxx', 'other']},
'verified': {'type': 'boolean'}
},
'additionalProperties': False
}
def search(self, task, entry, config):
search_strings = [normalize_unicode(s).lower() for s in entry.get('search_strings', [entry['title']])]
entries = set()
for search_string in search_strings:
search_string = clean_title(search_string)
search_string_url_fragment = search_string
params = {'rss': 1}
if config.get('verified'):
search_string_url_fragment += ' verified:1'
url = 'https://kat.cr/usearch/%s/' % quote(search_string_url_fragment.encode('utf-8'))
if config.get('category', 'all') != 'all':
params['category'] = config['category']
sorters = [{'field': 'time_add', 'sorder': 'desc'},
{'field': 'seeders', 'sorder': 'desc'}]
for sort in sorters:
params.update(sort)
log.debug('requesting: %s' % url)
try:
r = task.requests.get(url, params=params, raise_status=False)
except RequestException as e:
log.warning('Search resulted in: %s' % e)
continue
if not r.content:
log.debug('No content returned from search.')
continue
elif r.status_code != 200:
log.warning('Search returned %s response code' % r.status_code)
continue
rss = feedparser.parse(r.content)
ex = rss.get('bozo_exception', False)
if ex:
log.warning('Got bozo_exception (bad feed)')
continue
for item in rss.entries:
entry = Entry()
entry['title'] = item.title
if not item.get('enclosures'):
log.warning('Could not get url for entry from KAT. Maybe plugin needs updated?')
continue
entry['url'] = item.enclosures[0]['url']
entry['torrent_seeds'] = int(item.torrent_seeds)
entry['torrent_leeches'] = int(item.torrent_peers)
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
entry['content_size'] = int(item.torrent_contentlength) / 1024 / 1024
entry['torrent_info_hash'] = item.torrent_infohash
entries.add(entry)
if len(rss.entries) < 25:
break
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchKAT, 'kat', groups=['search'], api_ver=2)
|
qvazzler/Flexget
|
flexget/plugins/search/kat.py
|
Python
|
mit
| 3,694
|
# -*- coding: utf-8 -*-
#
import codecs
import datetime
import os
import abstractRenderer
import books
#
# Simplest renderer. Ignores everything except ascii text.
#
STANDARD_SUFFIX = '.html'
class Renderer(abstractRenderer.AbstractRenderer):
def __init__(self, inputDir, outputDir, outputName, config):
self.identity = 'single page html renderer'
self.outputDescription = os.path.join(outputDir, outputName + '.html')
abstractRenderer.AbstractRenderer.__init__(self, inputDir, outputDir, outputName, config)
# Unset
self.f = None # output file stream
# IO
self.outputFilename = os.path.join(outputDir, outputName + '.html')
self.inputDir = inputDir
# Position
self.cb = '' # Current Book
self.cc = '001' # Current Chapter
self.cv = '001' # Currrent Verse
self.indentFlag = False
self.bookName = ''
def render(self, order="normal"):
self.loadUSFM(self.inputDir)
self.f = open(self.outputFilename, 'w')
h = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>Bible</title>
<style media="all" type="text/css">
.indent-0 {
margin-left:0em;
margin-bottom:0em;
margin-top:0em;
}
.indent-1 {
margin-left:0em;
margin-bottom:0em;
margin-top:0em;
}
.indent-2 {
margin-left:1em;
margin-bottom:0em;
margin-top:0em;
}
.indent-3 {
margin-left:2em;
margin-bottom:0em;
margin-top:0em;
}
.c-num {
color:gray;
}
.v-num {
color:gray;
}
.tetragrammaton {
font-variant: small-caps;
}
</style>
</head>
<body>
<h1>Table of Contents</h1>
<p><b>Old Testament</b></p>
{{{otlinks}}}
<p><b>New Testament</b></p>
{{{ntlinks}}}
"""
h = h.replace('{{{otlinks}}}', self.bookList(1, 39))
h = h.replace('{{{ntlinks}}}', self.bookList(40, 66))
self.f.write(h)
self.f.write('<p>Document rendered ' + datetime.date.today().strftime("%A, %d %B %Y") + '</p>\n\n')
self.run(order)
self.f.write('</body></html>')
self.f.close()
def bookList(self, s, f):
h = ''
for b in range(s, f):
if books.silNames[b] in self.booksUsfm:
h = h + '\n<p class="indent-1"><a href="#' + str(b).zfill(3) + '">' + books.bookNames[b - 1].replace(' ', ' ') + '</a></p>'
else:
h = h + '\n' + books.bookNames[b - 1] + '<p class="indent-1">'
return h
def escape(self, s):
return s.replace('~',' ')
def write(self, unicodeString):
self.f.write(unicodeString.replace('~', ' '))
def writeIndent(self, level):
self.write('\n\n')
if level == 0:
self.indentFlag = False
self.write('<p class="indent-0">')
return
if not self.indentFlag:
self.indentFlag = True
self.write('<p>')
self.write('<p class="indent-' + str(level) + '">')
def render_id(self, token):
self.cb = books.bookKeyForIdValue(token.value)
self.indentFlag = False
self.write('\n\n<h1 id="' + self.cb + '"></h1>\n')
def render_h(self, token): self.bookname = token.value
def render_mt1(self, token): self.write('\n\n<h1>' + token.value + '</h1>')
def render_mt2(self, token): self.write('\n\n<h2>' + token.value + '</h2>')
def render_mt3(self, token): self.write('\n\n<h2>' + token.value + '</h2>')
def render_ms1(self, token): self.write('\n\n<h3>' + token.value + '</h3>')
def render_ms2(self, token): self.write('\n\n<h4>' + token.value + '</h4>')
def render_p(self, token):
self.indentFlag = False
self.write('\n\n<p>')
def render_pi(self, token):
self.indentFlag = False
self.write('\n\n<p class"indent-2">')
def render_m(self, token):
self.indentFlag = False
self.write('\n\n<p>')
def render_s1(self, token):
self.indentFlag = False
self.write('\n\n<h5>' + token.getValue() + '</h5>')
def render_s2(self, token):
self.indentFlag = False
self.write('\n\n<p align="center">----</p>')
def render_c(self, token):
self.cc = token.value.zfill(3)
self.write('\n\n<p class="c-num">[' + self.bookname + ' ' + token.value + ']</p>')
def render_v(self, token):
self.cv = token.value.zfill(3)
self.write(' <span class="v-num">[' + token.value + ']</span> ')
def render_wj_s(self, token): self.write('<span class="woc">')
def render_wj_e(self, token): self.write('</span>')
def render_text(self, token): self.write(" " + self.escape(token.value) + " ")
def render_q(self, token): self.writeIndent(1)
def render_q1(self, token): self.writeIndent(1)
def render_q2(self, token): self.writeIndent(2)
def render_q3(self, token): self.writeIndent(3)
def render_nb(self, token): self.writeIndent(0)
def render_b(self, token): self.write('\n\n<p class="indent-0"> </p>')
def render_i_s(self, token): self.write('<i>')
def render_i_e(self, token): self.write('</i>')
def render_nd_s(self, token): self.write('<span class="tetragrammaton">')
def render_nd_e(self, token): self.write('</span>')
def render_pbr(self, token): self.write('<br />')
def render_sc_s(self, token): self.write('<b>')
def render_sc_e(self, token): self.write('</b>')
def render_f_s(self, token): self.write('[Note: ')
def render_f_e(self, token): self.write(' ]')
def render_qs_s(self, token): self.write('<i>')
def render_qs_e(self, token): self.write('</i>')
def render_em_s(self, token): self.write('<i>')
def render_em_e(self, token): self.write('</i>')
def render_d(self, token):
self.indentFlag = False
self.write('\n\n<p>' + token.value + '</p>')
def render_pb(self, token): pass
def render_periph(self, token): pass
|
openenglishbible/USFM-Tools
|
transform/support/singlehtmlRenderer.py
|
Python
|
mit
| 6,765
|
from edc_sync.site_sync_models import site_sync_models
site_sync_models.register_for_app('edc_identifier')
|
botswana-harvard/edc-identifier
|
edc_identifier/sync_models.py
|
Python
|
gpl-2.0
| 108
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from application import RIDE, Project
|
caio2k/RIDE
|
src/robotide/application/__init__.py
|
Python
|
apache-2.0
| 646
|
"""
The points P (x1, y1) and Q (x2, y2) are plotted at integer co-ordinates and are joined to the origin, O(0,0), to form triange OPQ.
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate lies between 0 and 2 inclusive; that is,
0 x1, y1, x2, y2 2.
Given that 0 x1, y1, x2, y2 50, how many right triangles can be formed?
"""
from itertools import product
from itertools import combinations
def dot(u, v):
return u[0]*v[0] + u[1]*v[1]
N = 50
points = product(range(N+1), repeat=2)
count = 0
for op, oq in combinations(points, 2):
if (0, 0) in (op, oq): continue
pq = (oq[0]-op[0], oq[1]-op[1])
if dot(op, oq) * dot(op, pq) * dot(oq, pq) == 0:
count += 1
print count
|
peterstace/project-euler
|
OLD_PY_CODE/project_euler_old_old/91/91.py
|
Python
|
unlicense
| 755
|
#from eden.converter.fasta import fasta_to_sequence
from eden_rna.io.fasta import load
import itertools
def rfam_uri(family_id):
return 'http://rfam.xfam.org/family/%s/alignment?acc=%s&format=fastau&download=0'%(family_id,family_id)
def rfam_uri(family_id):
return '%s.fa'%(family_id)
def get_sequences(size=9999,withoutnames=False):
sequences = itertools.islice( load("../../toolsdata/RF00005.fa"), size)
if withoutnames:
return [ b for (a,b) in sequences ]
return sequences
import graphlearn.minor.rna.infernal as infernal
from graphlearn.feasibility import FeasibilityChecker as Checker
import graphlearn.estimate as estimate
from graphlearn.minor.rna import forgitransform as forgitransform
#from graphlearn.minor.decompose import MinorDecomposer
from graphlearn.minor.rna.rnadecomposer import RnaDecomposer
# not really needed since after refolding we get an RNA
#feasibility=Checker()
#feasibility.checklist.append(rna.is_rna)
graphs = get_sequences(size=100)
estimator=estimate.OneClassEstimator( nu=.33, cv=2, n_jobs=-1)
sampler=infernal.AbstractSampler(
#radius_list=[0,1],
#thickness_list=[2],
#min_cip_count=1,
#min_interface_count=2,
graphtransformer=forgitransform.GraphTransformerForgi(),
decomposer=RnaDecomposer(output_sequence=True,
pre_vectorizer_rm_f=True)
#estimator=estimator
#feasibility_checker=feasibility
)
sampler.fit(graphs)
graphs = get_sequences(size=5,withoutnames=True)
r= list( sampler.transform(graphs))
|
fabriziocosta/GraphLearn_examples
|
notebooks/Abstract/forgi_revival.py
|
Python
|
gpl-2.0
| 1,781
|
# -*- coding: utf-8 -*-
'''\
autoremove command
'''
#*****************************************************************************
# Copyright (C) 2009 Giuseppe Ottaviano <giuott@gmail.com>
#
# Distributed under the terms of the GPL License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re
from optparse import make_option
from bpt import UserError, log
from bpt.ui.command import Command
from bpt.box import require_box
class autoremove(Command):
'''Remove disabled packages not matching the given regexps'''
usage_args = ''
def __init__(self):
options = [make_option('-e', '--exclude', action='append',
dest='exclude',
default=[],
help='Do not remove packages matching REGEX. Can be specified multiple times.',
metavar='REGEX')
]
Command.__init__(self, options)
def _run(self, config, cmd_options, cmd_args):
require_box(config)
box = config.box
regexps = [re.compile(pattern + '$', re.IGNORECASE) for pattern in cmd_options.exclude]
to_remove = []
for pkg in box.packages():
if pkg.enabled:
continue
for regexp in regexps:
if regexp.match(pkg.name):
break
else: # no exclude regexp matched
to_remove.append(pkg)
if not to_remove:
log.info('No packages to remove')
return 0
print 'The following packages will be removed:'
for pkg in to_remove:
print '\t%s' % pkg.name
answer = raw_input('Are you sure [y/N]? ')
if answer.lower() != 'y':
return 0
for pkg in to_remove:
box.disable_package(pkg, remove=True)
|
ot/bpt
|
bpt/ui/commands/autoremove.py
|
Python
|
gpl-2.0
| 2,003
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import cint, formatdate
@frappe.whitelist(allow_guest=True)
def send_message(subject="Website Query", message="", sender="", status="Open"):
from frappe.www.contact import send_message as website_send_message
lead = customer = None
website_send_message(subject, message, sender)
customer = frappe.db.sql("""select distinct dl.link_name from `tabDynamic Link` dl
left join `tabContact` c on dl.parent=c.name where dl.link_doctype='Customer'
and c.email_id='{email_id}'""".format(email_id=sender))
if not customer:
lead = frappe.db.get_value('Lead', dict(email_id=sender))
if not lead:
new_lead = frappe.get_doc(dict(
doctype='Lead',
email_id = sender,
lead_name = sender.split('@')[0].title()
)).insert(ignore_permissions=True)
opportunity = frappe.get_doc(dict(
doctype='Opportunity',
enquiry_from = 'Customer' if customer else 'Lead',
status = 'Open',
title = subject,
to_discuss=message
))
if customer:
opportunity.customer = customer[0][0]
elif lead:
opportunity.lead = lead
else:
opportunity.lead = new_lead.name
opportunity.insert(ignore_permissions=True)
comm = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": message,
"sender": sender,
"sent_or_received": "Received",
'reference_doctype': 'Opportunity',
'reference_name': opportunity.name
})
comm.insert(ignore_permissions=True)
return "okay"
|
hassanibi/erpnext
|
erpnext/templates/utils.py
|
Python
|
gpl-3.0
| 1,754
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.ui'
#
# Created by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(330, 110, 100, 31))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 27))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pushButton.setText(_translate("MainWindow", "TEst!", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
patta42/pySICM
|
pySICMgui/test.py
|
Python
|
gpl-3.0
| 2,073
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp C-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
data_motion.DataMotionMixin):
"""NetApp block storage library for Data ONTAP (Cluster-mode).
Version history:
.. code-block:: none
1.0.0 - Driver development before Wallaby
2.0.0 - Add support for QoS minimums specs
Add support for dynamic Adaptive QoS policy group creation
3.0.0 - Add support for Intra-cluster Storage assisted volume migration
Add support for revert to snapshot
"""
VERSION = "3.0.0"
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, driver_name, driver_protocol, **kwargs):
super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name,
driver_protocol,
**kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
self.driver_mode = 'cluster'
self.failed_over_backend_name = kwargs.get('active_backend_id')
self.failed_over = self.failed_over_backend_name is not None
self.replication_enabled = (
True if self.get_replication_backend_names(
self.configuration) else False)
def do_setup(self, context):
super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
# cDOT API client
self.zapi_client = dot_utils.get_client_for_backend(
self.failed_over_backend_name or self.backend_name)
self.vserver = self.zapi_client.vserver
# Storage service catalog
self.ssc_library = capabilities.CapabilitiesLibrary(
self.driver_protocol, self.vserver, self.zapi_client,
self.configuration)
self.ssc_library.check_api_permissions()
self.using_cluster_credentials = (
self.ssc_library.cluster_user_supported())
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
def _update_zapi_client(self, backend_name):
"""Set cDOT API client for the specified config backend stanza name."""
self.zapi_client = dot_utils.get_client_for_backend(backend_name)
self.vserver = self.zapi_client.vserver
self.ssc_library._update_for_failover(self.zapi_client,
self._get_flexvol_to_pool_map())
ssc = self.ssc_library.get_ssc()
self.perf_library._update_for_failover(self.zapi_client, ssc)
# Clear LUN table cache
self.lun_table = {}
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
if not self._get_flexvol_to_pool_map():
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise na_utils.NetAppDriverException(msg)
self._add_looping_tasks()
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
# Note(cknight): Run the update once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Add the task that updates the slow-changing storage service catalog
self.loopingcalls.add_task(self._update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR)
self.loopingcalls.add_task(
self._handle_housekeeping_tasks,
loopingcalls.TEN_MINUTES,
0)
super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks()
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
active_backend = self.failed_over_backend_name or self.backend_name
# Add the task that harvests soft-deleted QoS policy groups.
if self.using_cluster_credentials:
self.zapi_client.remove_unused_qos_policy_groups()
LOG.debug("Current service state: Replication enabled: %("
"replication)s. Failed-Over: %(failed)s. Active Backend "
"ID: %(active)s",
{
'replication': self.replication_enabled,
'failed': self.failed_over,
'active': active_backend,
})
# Create pool mirrors if whole-backend replication configured
if self.replication_enabled and not self.failed_over:
self.ensure_snapmirrors(
self.configuration, self.backend_name,
self.ssc_library.get_ssc_flexvol_names())
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, self.vserver,
self.ssc_library.get_ssc_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None,
qos_policy_group_is_adaptive=False):
"""Creates a LUN, handling Data ONTAP differences as needed."""
self.zapi_client.create_lun(
volume_name, lun_name, size, metadata, qos_policy_group_name,
qos_policy_group_is_adaptive)
def _create_lun_handle(self, metadata, vserver=None):
"""Returns LUN handle based on filer type."""
vserver = vserver or self.vserver
return '%s:%s' % (self.vserver, metadata['Path'])
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
initiator_igroups = self.zapi_client.get_igroup_by_initiators(
initiator_list)
lun_maps = self.zapi_client.get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(na_utils.OPENSTACK_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return igroup_name, lun_map['lun-id']
return None, None
def _clone_lun(self, name, new_name, space_reserved=None,
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0, source_snapshot=None, is_snapshot=False,
qos_policy_group_is_adaptive=False):
"""Clone LUN with the given handle to the new name."""
if not space_reserved:
space_reserved = self.lun_space_reservation
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
self.zapi_client.clone_lun(
volume, name, new_name, space_reserved,
qos_policy_group_name=qos_policy_group_name,
src_block=src_block, dest_block=dest_block,
block_count=block_count,
source_snapshot=source_snapshot,
is_snapshot=is_snapshot,
qos_policy_group_is_adaptive=qos_policy_group_is_adaptive)
LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No cloned LUN named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % new_name)
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(
block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
def _create_lun_meta(self, lun):
"""Creates LUN metadata dictionary."""
self.zapi_client.check_is_naelement(lun)
meta_dict = {}
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
meta_dict['UUID'] = lun.get_child_content('uuid')
return meta_dict
def _get_fc_target_wwpns(self, include_partner=True):
return self.zapi_client.get_fc_target_wwpns()
def _update_volume_stats(self, filter_function=None,
goodness_function=None):
"""Retrieve backend stats."""
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.driver_protocol
data['pools'] = self._get_pool_stats(
filter_function=filter_function,
goodness_function=goodness_function)
data['sparse_copy_volume'] = True
# Used for service state report
data['replication_enabled'] = self.replication_enabled
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (Data ONTAP flexvol) stats.
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the flexvol name.
"""
pools = []
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
# Utilization and performance metrics require cluster-scoped
# credentials
if self.using_cluster_credentials:
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(
aggregates)
else:
aggr_capacities = {}
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = self.using_cluster_credentials
pool['multiattach'] = True
pool['online_extend_support'] = True
pool['consistencygroup_support'] = True
pool['consistent_group_snapshot_enabled'] = True
pool['reserved_percentage'] = self.reserved_percentage
pool['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
# Add up-to-date capacity info
capacity = self.zapi_client.get_flexvol_capacity(
flexvol_name=ssc_vol_name)
size_total_gb = capacity['size-total'] / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
size_available_gb = capacity['size-available'] / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
if self.configuration.netapp_driver_reports_provisioned_capacity:
luns = self.zapi_client.get_lun_sizes_by_volume(
ssc_vol_name)
provisioned_cap = 0
for lun in luns:
lun_name = lun['path'].split('/')[-1]
# Filtering luns that matches the volume name template to
# exclude snapshots
if volume_utils.extract_id_from_volume_name(lun_name):
provisioned_cap = provisioned_cap + lun['size']
pool['provisioned_capacity_gb'] = na_utils.round_down(
float(provisioned_cap) / units.Gi)
if self.using_cluster_credentials:
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
else:
dedupe_used = 0.0
pool['netapp_dedupe_used_percent'] = na_utils.round_down(
dedupe_used)
aggregate_name = ssc_vol_info.get('netapp_aggregate')
aggr_capacity = aggr_capacities.get(aggregate_name, {})
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
'percent-used', 0)
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
# Add replication capabilities/stats
pool.update(
self.get_replication_backend_stats(self.configuration))
pools.append(pool)
return pools
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that match the pool name search pattern.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
"""
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
pools = {}
flexvol_names = self.zapi_client.list_flexvols()
for flexvol_name in flexvol_names:
msg_args = {
'flexvol': flexvol_name,
'vol_pattern': pool_regex.pattern,
}
if pool_regex.match(flexvol_name):
msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
LOG.debug(msg, msg_args)
pools[flexvol_name] = {'pool_name': flexvol_name}
else:
msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
LOG.debug(msg, msg_args)
return pools
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Delete even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
def _setup_qos_for_volume(self, volume, extra_specs):
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume, extra_specs)
except exception.Invalid:
msg = _('Invalid QoS specification detected while getting QoS '
'policy for volume %s') % volume['id']
raise exception.VolumeBackendAPIException(data=msg)
pool = volume_utils.extract_host(volume['host'], level='pool')
qos_min_support = self.ssc_library.is_qos_min_supported(pool)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info,
qos_min_support)
return qos_policy_group_info
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
if self.replication_enabled:
return {'replication_status': fields.ReplicationStatus.ENABLED}
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
is_adaptive = na_utils.is_qos_policy_group_spec_adaptive(
qos_policy_group_info)
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info, is_adaptive)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Unmanage even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover a backend to a secondary replication target."""
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
ssc = self.ssc_library.get_ssc()
return list(ssc.keys())
def create_group(self, group):
"""Driver entry point for creating a generic volume group.
ONTAP does not maintain an actual Group construct. As a result, no
communication to the backend is necessary for generic volume group
creation.
:returns: Hard-coded model update for generic volume group model.
"""
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
def delete_group(self, group, volumes):
"""Driver entry point for deleting a group.
:returns: Updated group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_lun(volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': 'error_deleting'})
LOG.exception("Volume %(vol)s in the group could not be "
"deleted.", {'vol': volume})
return model_update, volumes_model_update
def update_group(self, group, add_volumes=None, remove_volumes=None):
"""Driver entry point for updating a generic volume group.
Since no actual group construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
def create_group_snapshot(self, group_snapshot, snapshots):
"""Creates a Cinder group snapshot object.
The Cinder group snapshot object is created by making use of an
ephemeral ONTAP consistency group snapshot in order to provide
write-order consistency for a set of flexvol snapshots. First, a list
of the flexvols backing the given Cinder group must be gathered. An
ONTAP group-snapshot of these flexvols will create a snapshot copy of
all the Cinder volumes in the generic volume group. For each Cinder
volume in the group, it is then necessary to clone its backing LUN from
the ONTAP cg-snapshot. The naming convention used for the clones is
what indicates the clone's role as a Cinder snapshot and its inclusion
in a Cinder group. The ONTAP cg-snapshot of the flexvols is no longer
required after having cloned the LUNs backing the Cinder volumes in
the Cinder group.
:returns: An implicit update for group snapshot and snapshots models
that is interpreted by the manager to set their models to
available.
"""
try:
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
self._create_consistent_group_snapshot(group_snapshot,
snapshots)
else:
for snapshot in snapshots:
self._create_snapshot(snapshot)
except Exception as ex:
err_msg = (_("Create group snapshot failed (%s).") % ex)
LOG.exception(err_msg, resource=group_snapshot)
raise na_utils.NetAppDriverException(err_msg)
return None, None
def _create_consistent_group_snapshot(self, group_snapshot, snapshots):
flexvols = set()
for snapshot in snapshots:
flexvols.add(volume_utils.extract_host(
snapshot['volume']['host'], level='pool'))
self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
for snapshot in snapshots:
self._clone_lun(snapshot['volume']['name'], snapshot['name'],
source_snapshot=group_snapshot['id'])
for flexvol in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol, group_snapshot['id'])
self.zapi_client.delete_snapshot(
flexvol, group_snapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol, group_snapshot['id'])
def delete_group_snapshot(self, group_snapshot, snapshots):
"""Delete LUNs backing each snapshot in the group snapshot.
:returns: An implicit update for snapshots models that is interpreted
by the manager to set their models to deleted.
"""
for snapshot in snapshots:
self._delete_lun(snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
def create_group_from_src(self, group, volumes, group_snapshot=None,
snapshots=None, source_group=None,
source_vols=None):
"""Creates a group from a group snapshot or a group of cinder vols.
:returns: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
volume_model_updates = []
if group_snapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
else:
vols = zip(volumes, source_vols)
for volume, old_src_vref in vols:
src_lun = self._get_lun_from_table(old_src_vref['name'])
source = {'name': src_lun.name, 'size': old_src_vref['size']}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
return None, volume_model_updates
def _move_lun(self, volume, src_ontap_volume, dest_ontap_volume,
dest_lun_name=None):
"""Moves LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_move(
volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_lun_name=dest_lun_name)
LOG.debug('Start moving LUN %s from %s to %s. '
'Job UUID is %s.', volume.name, src_ontap_volume,
dest_ontap_volume, job_uuid)
def _wait_lun_move_complete():
move_status = self.zapi_client.get_lun_move_status(job_uuid)
LOG.debug('Waiting for LUN move job %s to complete. '
'Current status is: %s.', job_uuid,
move_status['job-status'])
if not move_status:
status_error_msg = (_("Error moving LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif move_status['job-status'] == 'destroyed':
status_error_msg = (_('Error moving LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
move_status['last-failure-reason']))
elif move_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_move_complete)
timer.start(
interval=15,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except loopingcall.LoopingCallTimeOut:
msg = (_('Timeout waiting to complete move operation of LUN %s.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _cancel_lun_copy(self, job_uuid, volume, dest_pool, dest_backend_name):
"""Cancel an on-going lun copy operation."""
try:
# NOTE(sfernand): Another approach would be first checking if
# the copy operation isn't in `destroying` or `destroyed` states
# before issuing cancel.
self.zapi_client.cancel_lun_copy(job_uuid)
except na_utils.NetAppDriverException:
dest_client = dot_utils.get_client_for_backend(dest_backend_name)
lun_path = '/vol/%s/%s' % (dest_pool, volume.name)
try:
dest_client.destroy_lun(lun_path)
except Exception:
LOG.warning('Error cleaning up LUN %s in destination volume. '
'Verify if destination volume still exists in '
'pool %s and delete it manually to avoid unused '
'resources.', lun_path, dest_pool)
def _copy_lun(self, volume, src_ontap_volume, src_vserver,
dest_ontap_volume, dest_vserver, dest_lun_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_copy(
volume.name, dest_ontap_volume, dest_vserver,
src_ontap_volume=src_ontap_volume, src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
LOG.debug('Start copying LUN %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
{'vol': volume.name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_ontap_volume,
'job': job_uuid})
def _wait_lun_copy_complete():
copy_status = self.zapi_client.get_lun_copy_status(job_uuid)
LOG.debug('Waiting for LUN copy job %s to complete. Current '
'status is: %s.', job_uuid, copy_status['job-status'])
if not copy_status:
status_error_msg = (_("Error copying LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
copy_status['last-failure-reason']))
elif copy_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_copy_complete)
timer.start(
interval=10,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if cancel_on_error:
self._cancel_lun_copy(job_uuid, volume, dest_ontap_volume,
dest_backend_name=dest_backend_name)
if isinstance(e, loopingcall.LoopingCallTimeOut):
ctxt.reraise = False
msg = (_('Timeout waiting volume %s to complete '
'migration.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _finish_migrate_volume_to_vserver(self, src_volume):
"""Finish volume migration to another vserver within the cluster."""
# The source volume can be safely deleted after a successful migration.
self.delete_volume(src_volume)
# LUN cache for current backend can be deleted after migration.
self._delete_lun_from_table(src_volume.name)
def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver,
dest_pool, dest_vserver, dest_backend_name):
"""Migrate volume to a another vserver within the same cluster."""
LOG.info('Migrating volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# NOTE(sfernand): Migrating to a different vserver relies on coping
# operations which are always disruptive, as it requires the
# destination volume to be added as a new block device to the Nova
# instance. This differs from migrating volumes in a same vserver,
# since we can make use of a LUN move operation without the
# need of changing the iSCSI target.
if volume.status != fields.VolumeStatus.AVAILABLE:
msg = _("Volume status must be 'available' in order to "
"migrate volume to another vserver.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vserver_peer_application = 'lun_copy'
self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver,
[vserver_peer_application])
self._copy_lun(volume, src_pool, src_vserver, dest_pool,
dest_vserver, dest_backend_name=dest_backend_name,
cancel_on_error=True)
self._finish_migrate_volume_to_vserver(volume)
LOG.info('Successfully migrated volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s '
'to %(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# No model updates are necessary, so return empty dict
return {}
def _finish_migrate_volume_to_pool(self, src_volume, dest_pool):
"""Finish volume migration to another pool within the same vserver."""
# LUN cache must be updated with new path and volume information.
lun = self._get_lun_from_table(src_volume.name)
new_lun_path = '/vol/%s/%s' % (dest_pool, src_volume.name)
lun.metadata['Path'] = new_lun_path
lun.metadata['Volume'] = dest_pool
def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver,
dest_backend_name):
"""Migrate volume to another Cinder Pool within the same vserver."""
LOG.info('Migrating volume %(vol)s from pool %(src)s to '
'%(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
updates = {}
try:
self._move_lun(volume, src_pool, dest_pool)
except na_utils.NetAppDriverTimeout:
error_msg = (_('Timeout waiting volume %s to complete migration.'
'Volume status is set to maintenance to prevent '
'performing operations with this volume. Check the '
'migration status on the storage side and set '
'volume status manually if migration succeeded.'))
LOG.warning(error_msg, volume.id)
updates['status'] = fields.VolumeStatus.MAINTENANCE
except na_utils.NetAppDriverException as e:
error_msg = (_('Failed to migrate volume %(vol)s from pool '
'%(src)s to %(dest)s. %(err)s'))
raise na_utils.NetAppDriverException(
error_msg % {'vol': volume.id, 'src': src_pool,
'dest': dest_pool, 'err': e})
self._finish_migrate_volume_to_pool(volume, dest_pool)
LOG.info('Successfully migrated volume %(vol)s from pool %(src)s '
'to %(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
return updates
def migrate_volume(self, context, volume, host):
"""Migrate Cinder volume to the specified pool or vserver."""
return self.migrate_volume_ontap_assisted(
volume, host, self.backend_name, self.configuration.netapp_vserver)
def revert_to_snapshot(self, volume, snapshot):
"""Driver entry point for reverting volume to snapshot."""
try:
self._revert_to_snapshot(volume, snapshot)
except Exception:
raise exception.VolumeBackendAPIException(
"Revert snapshot failed.")
def _revert_to_snapshot(self, volume, snapshot):
"""Sets up all required resources for _swap_luns.
If _swap_luns fails, the cloned LUN is destroyed.
"""
new_lun_name = self._clone_snapshot(snapshot["name"])
LOG.debug("Cloned from snapshot: %s.", new_lun_name)
lun = self._get_lun_from_table(volume["name"])
volume_path = lun.metadata["Path"]
seg = volume_path.split("/")
lun_name = seg[-1]
flexvol_name = seg[2]
try:
self._swap_luns(lun_name, new_lun_name, flexvol_name)
except Exception:
LOG.error("Swapping LUN from %s to %s failed.", lun_name,
new_lun_name)
with excutils.save_and_reraise_exception():
try:
LOG.debug("Deleting temporary reverted LUN %s.",
new_lun_name)
new_lun_path = "/vol/%s/%s" % (flexvol_name, new_lun_name)
self.zapi_client.destroy_lun(new_lun_path)
except Exception:
LOG.error("Failure deleting temporary reverted LUN %s. "
"A manual deletion is required.", new_lun_name)
def _clone_snapshot(self, snapshot_name):
"""Returns the name of the LUN cloned from snapshot.
Creates a LUN with same metadata as original LUN and then clones
from snapshot. If clone operation fails, the new LUN is deleted.
"""
snapshot_lun = self._get_lun_from_table(snapshot_name)
snapshot_path = snapshot_lun.metadata["Path"]
lun_name = snapshot_path.split("/")[-1]
flexvol_name = snapshot_path.split("/")[2]
LOG.info("Cloning LUN %s from snapshot %s in volume %s.", lun_name,
snapshot_name, flexvol_name)
metadata = snapshot_lun.metadata
block_count = self._get_lun_block_count(snapshot_path)
if block_count == 0:
msg = _("%s cannot be reverted using clone operation"
" as it contains no blocks.")
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
new_snap_name = "new-%s" % snapshot_name
self.zapi_client.create_lun(
flexvol_name, new_snap_name,
six.text_type(snapshot_lun.size), metadata)
try:
self._clone_lun(snapshot_name, new_snap_name,
block_count=block_count)
return new_snap_name
except Exception:
with excutils.save_and_reraise_exception():
try:
new_lun_path = "/vol/%s/%s" % (flexvol_name, new_snap_name)
self.zapi_client.destroy_lun(new_lun_path)
except Exception:
LOG.error("Failure deleting temporary reverted LUN %s. "
"A manual deletion is required.", new_snap_name)
def _swap_luns(self, original_lun, new_lun, flexvol_name):
"""Swaps cloned and original LUNs using a temporary LUN.
Moves the original LUN to a temporary path, then moves the cloned LUN
to the original path (if this fails, moves the temporary LUN back as
original LUN) and finally destroys the LUN with temporary path.
"""
tmp_lun = "tmp-%s" % original_lun
original_path = "/vol/%s/%s" % (flexvol_name, original_lun)
tmp_path = "/vol/%s/%s" % (flexvol_name, tmp_lun)
new_path = "/vol/%s/%s" % (flexvol_name, new_lun)
LOG.debug("Original Path: %s.", original_path)
LOG.debug("Temporary Path: %s.", tmp_path)
LOG.debug("New Path %s.", new_path)
try:
self.zapi_client.move_lun(original_path, tmp_path)
except Exception:
msg = _("Failure moving original LUN from %s to %s." %
(original_path, tmp_path))
raise exception.VolumeBackendAPIException(data=msg)
try:
self.zapi_client.move_lun(new_path, original_path)
except Exception:
LOG.debug("Move temporary reverted LUN failed. Moving back "
"original LUN to original path.")
try:
self.zapi_client.move_lun(tmp_path, original_path)
except Exception:
LOG.error("Could not move original LUN path from %s to %s. "
"Cinder may lose the volume management. Please, you "
"should move it back manually.",
tmp_path, original_path)
msg = _("Failure moving temporary reverted LUN from %s to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (new_path, original_path))
try:
self.zapi_client.destroy_lun(tmp_path)
except Exception:
LOG.error("Failure deleting old LUN %s. A manual deletion "
"is required.", tmp_lun)
|
openstack/cinder
|
cinder/volume/drivers/netapp/dataontap/block_cmode.py
|
Python
|
apache-2.0
| 42,861
|
""":mod:`libearth.tz` --- Basic timezone implementations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Almost of this module is from the official documentation of
:mod:`datetime` module in Python standard library.
.. data:: utc
(:class:`Utc`, :class:`datetime.timezone`) The :class:`~datetime.tzinfo`
instance that represents UTC. It's an instance of :class:`Utc`
in Python 2 (which provide no built-in fixed-offset
:class:`~datetime.tzinfo` implementation), and an instance of
:class:`~datetime.timezone` with zero offset in Python 3.
"""
import datetime
__all__ = 'FixedOffset', 'Utc', 'now', 'utc'
class Utc(datetime.tzinfo):
"""UTC.
In most cases, it doesn't need to be directly instantiated:
there's already the :const:`utc` value.
"""
def __init__(self):
self.zero = datetime.timedelta(0)
def utcoffset(self, dt):
return self.zero
def dst(self, dt):
return self.zero
def tzname(self, dt):
return 'UTC'
def __repr__(self):
cls = type(self)
return '{0.__module__}.{0.__name__}()'.format(cls)
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC.
>>> kst = FixedOffset(9 * 60, name='Asia/Seoul') # KST +09:00
>>> current = now()
>>> current
datetime.datetime(2013, 8, 15, 3, 18, 37, 404562, tzinfo=libearth.tz.Utc())
>>> current.astimezone(kst)
datetime.datetime(2013, 8, 15, 12, 18, 37, 404562,
tzinfo=<libearth.tz.FixedOffset Asia/Seoul>)
"""
def __init__(self, offset, name=None):
self.offset = datetime.timedelta(minutes=offset)
self.dst_ = datetime.timedelta(0)
self.name = name or '{h:+03d}:{m:02d}'.format(h=offset // 60,
m=offset % 60)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return self.dst_
def tzname(self, dt):
return self.name
def __repr__(self):
cls = type(self)
return '<{0.__module__}.{0.__name__} {1}>'.format(cls, self.name)
try:
utc = datetime.timezone.utc
except AttributeError:
utc = Utc()
def now():
"""Return the current :class:`~datetime.datetime` with the proper
:class:`~datetime.tzinfo` setting.
>>> now()
datetime.datetime(2013, 8, 15, 3, 17, 11, 892272, tzinfo=libearth.tz.Utc())
>>> now()
datetime.datetime(2013, 8, 15, 3, 17, 17, 532483, tzinfo=libearth.tz.Utc())
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
|
0hoo/libearth
|
libearth/tz.py
|
Python
|
gpl-2.0
| 2,576
|
import unittest
from os.path import join
from unittest import mock
from tests.recipes.recipe_lib_test import RecipeCtx
class TestPandasRecipe(RecipeCtx, unittest.TestCase):
"""
TestCase for recipe :mod:`~pythonforandroid.recipes.pandas`
"""
recipe_name = "pandas"
@mock.patch("pythonforandroid.recipe.Recipe.check_recipe_choices")
@mock.patch("pythonforandroid.build.ensure_dir")
@mock.patch("pythonforandroid.archs.glob")
@mock.patch("pythonforandroid.archs.find_executable")
def test_get_recipe_env(
self,
mock_find_executable,
mock_glob,
mock_ensure_dir,
mock_check_recipe_choices,
):
"""
Test that method
:meth:`~pythonforandroid.recipes.pandas.PandasRecipe.get_recipe_env`
returns the expected flags
"""
mock_find_executable.return_value = (
"/opt/android/android-ndk/toolchains/"
"llvm/prebuilt/linux-x86_64/bin/clang"
)
mock_glob.return_value = ["llvm"]
mock_check_recipe_choices.return_value = sorted(
self.ctx.recipe_build_order
)
numpy_includes = join(
self.ctx.get_python_install_dir(), "numpy/core/include",
)
env = self.recipe.get_recipe_env(self.arch)
self.assertIn(numpy_includes, env["NUMPY_INCLUDES"])
self.assertIn(" -landroid", env["LDFLAGS"])
# make sure that the mocked methods are actually called
mock_glob.assert_called()
mock_ensure_dir.assert_called()
mock_find_executable.assert_called()
mock_check_recipe_choices.assert_called()
|
germn/python-for-android
|
tests/recipes/test_pandas.py
|
Python
|
mit
| 1,651
|
import json
from google.appengine.ext import ndb
from datastore.models import WebResource, Indexer
from handlers.basehandler import JSONBaseHandler
__author__ = 'Lorenzo'
class ArticlesJSONv1(JSONBaseHandler):
"""
Articles JSON API Handlers.
Extended from JSONBaseHandler.
See http://hypermedia.projectchronos.eu/docs
"""
def __init__(self, *args, **kwargs):
super(ArticlesJSONv1, self).__init__(*args, **kwargs)
self._VERSION = 'v04'
self._BASEPATH = '/articles/' + self._VERSION + '/'
def get(self, obj=None):
"""
GET /articles/v04/<name>
Serve the Articles API.
:param name: define namespace of the request (getting articles or keywords), can be a void string
"""
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
self.response.headers.add_header("Content-Type", "application/json")
# if path = / --> Base
# if path = /by --> ?type or ?keyword
# if path = /keywords/by ---> ?url
if self.request.path == self._BASEPATH:
# serve Articles Base
setattr(self, '_query_type', 'ALL')
setattr(self,
'_query',
self._query.order(-WebResource.published))
return self.response.out.write(
json.dumps(
self.return_paginated_articles(),
indent=2
)
)
elif self.request.path == self._BASEPATH + 'by':
if self.request.get('type'):
# serve Articles by type
if self.request.get('type') in tuple(WebResource.type_of._choices):
# set global handler attributes for the handler instance
setattr(self, '_query_type', 'TYPE_OF')
setattr(self,
'_query',
self._query.filter(WebResource.type_of == self.request.get('type'))
.order(WebResource.type_of, -WebResource.published))
print self._query_type
response = self.return_paginated_articles()
return self.response.out.write(
json.dumps(
response,
indent=2
)
)
else:
return self.response.out.write(
self.json_error_handler(404, 'incorrect "type" parameter value')
)
elif self.request.get('keyword'):
# serve articles by keyword
return self.response.out.write(
json.dumps(
self.return_articles_by_keyword(self.request.get('keyword')),
indent=2
)
)
else:
return self.response.out.write(
self.json_error_handler(404, 'need to define a ?type or a ?keyword')
)
elif self.request.path == self._BASEPATH + 'keywords/by':
if self.request.get('url'):
# serve keywords for a given article's url
response = self.memcache_keywords(self.request.get('url'))
return self.response.out.write(
json.dumps(
response,
indent=2
)
)
elif self.request.get('wikislug'):
# return a mapping between a label from wikipedia and keywords
pass
else:
return self.response.out.write(
self.json_error_handler(404, 'need to define a ?url')
)
elif self.request.path == self._BASEPATH + 'indexer':
# return all the terms (keywords) used by the index
try:
results = self.memcache_indexer_keywords_distinct()
except ValueError as e:
return self.response.out.write(
self.json_error_handler(404, 'wrong term')
)
return self.response.out.write(
json.dumps(results)
)
elif self.request.path == self._BASEPATH + 'indexer/by':
if self.request.get('term'):
# find the genealogy of a term in the Taxonomy
try:
results = self.memcache_indexer_keywords_distinct(self.request.get('term'))
except ValueError as e:
return self.response.out.write(
self.json_error_handler(404, 'wrong term')
)
return self.response.out.write(
json.dumps(results)
)
elif self.request.get('subject'):
pass
elif self.request.get('division'):
pass
else:
return self.response.out.write(
self.json_error_handler(404, 'need to define a ?term, ?subject or ?division')
)
elif self.request.path == self._BASEPATH + 'resources/by':
if self.request.get('type'):
# serve keywords for a given type: wikislugs, missions, events ...
pass
elif self.request.get('id'):
# serve a given resource from its id
pass
else:
return self.response.out.write(
self.json_error_handler(404, 'need to define a ?url')
)
elif isinstance(obj, int):
# serve a single article object by id
self.response.set_status(404)
response = {
"error": "not implemented",
"status": "404"
}
return self.response.out.write(
json.dumps(
response,
indent=2
)
)
else:
return self.response.out.write(
self.json_error_handler(404, 'wrong url')
)
def return_paginated_articles(self):
"""
Fetch and return WebResource paginated query using ndb.query.fetch_page()
:param query: the query to paginate
:return: a dict() ready for JSON serialization
"""
# Forked from https://github.com/GoogleCloudPlatform/appengine-paging-python
page_size = 25
cursor = None
next_bookmark = None
bookmark = self.request.get('bookmark')
if bookmark:
# if bookmark is set, serve the part of the cursor from the given bookamrk plus the page size
cursor = ndb.Cursor.from_websafe_string(bookmark)
articles, next_cursor, more = self._query.fetch_page(page_size, start_cursor=cursor)
# assign the key for the next cursor
if more:
next_bookmark = next_cursor.to_websafe_string()
# serve the data with the link to the next bookmark
response = self.memcache_articles_pagination(articles, next_bookmark)
return response
def return_articles_by_keyword(self, kwd):
# fetch entities
webresources = self.memcache_articles_by_keyword(kwd)
response = {
"keyword": kwd,
"articles": [
w.dump_to_json()
for w in webresources
]
} if webresources else {"keyword": kwd, "articles_by_keyword": []}
return response
def build_response(self, query, bookmark):
"""
Extends super().build_response
"""
from config.config import articles_api_version
# define the right url for the endpoint
if self._query_type == 'ALL':
url = articles_api_version(self._API_VERSION) + '?bookmark='
elif self._query_type == 'TYPE_OF':
url = articles_api_version(self._API_VERSION) + 'by?type=' +\
self.request.get('type') + '&bookmark='
else:
raise ValueError('JSONBaseHandler.build_response(): self._query_type value error')
# return the dictionary output
return {
'articles': [
webres.dump_to_json()
for webres in query
],
'next': url + bookmark if bookmark else None
}
|
SpaceAppsXploration/rdfendpoints
|
handlers/articlesjsonapi.py
|
Python
|
apache-2.0
| 8,493
|
import ltask,json
from ltask import pset
class Task(ltask.Task):
#def __init__(self,params):
#super(ltask.Task,self).__init__(self,params)
def transform_params(self,p): #called in ltask.Task processed_params
return p
def out_dir(self):
return './examples/out/'
def kill(self):
pass
def run(self):
p = self.processed_params()
open(p['output_dir']+'/params.json','w').write(json.dumps(self.params,indent=5))
print(p)
def scan():
P = pset('a',[1,2,3,4,5,6,7])*pset('b',['a','b','c','d','e']) + pset('x',[1000,10001])
P.name = 'dummy1'
return P
|
redsh/Tasks-In-A-Bottle
|
examples/dummy.py
|
Python
|
mit
| 641
|
"""
gstation-edit GrpParameter definition
"""
# this file is part of gstation-edit
# Copyright (C) F LAIGNEL 2009-2017 <fengalin@free.fr>
#
# gstation-edit is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gstation-edit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from gstation_edit.ui_core.parameter import Parameter
class GrpParameter(Parameter):
def __init__(self, parent, name, is_active=1):
Parameter.__init__(self, parent, name, cc_nb=-1, parameter_nb=-1,
is_sensitive=is_active)
self.parameters = dict()
def get_widget_name(self):
# no widget here, just a group
pass
def get_widget_label_name(self):
# no widget here, just a group
pass
def init_widget(self, gtk_builder):
for parameter in self.parameters.values():
parameter.init_widget(gtk_builder)
def set_sensitive(self, is_sensitive):
for parameter in self.parameters.values():
parameter.set_sensitive(is_sensitive)
self.is_sentitive = is_sensitive
def send_parameter_value(self, parameter):
self.set_value(parameter.value)
def update_widget(self):
for parameter in self.parameters.values():
if self.value == parameter.value:
parameter.set_active(True)
else:
parameter.set_active(False)
def add_parameter(self, parameter):
if len(self.parameters) == 0:
self.parameter_nb = parameter.parameter_nb
self.cc_nb = parameter.cc_nb
self.value = parameter.value
self.parameters[parameter.name] = parameter
|
fengalin/gstation-edit
|
gstation_edit/ui_core/grp_parameter.py
|
Python
|
lgpl-3.0
| 2,162
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the DBCore database abstraction.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import sqlite3
from six import assertRaisesRegex
from test import _common
from test._common import unittest
from beets import dbcore
from tempfile import mkstemp
import six
# Fixture: concrete database and model classes. For migration tests, we
# have multiple models with different numbers of fields.
class TestSort(dbcore.query.FieldSort):
pass
class TestModel1(dbcore.Model):
_table = 'test'
_flex_table = 'testflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
}
_types = {
'some_float_field': dbcore.types.FLOAT,
}
_sorts = {
'some_sort': TestSort,
}
@classmethod
def _getters(cls):
return {}
def _template_funcs(self):
return {}
class TestDatabase1(dbcore.Database):
_models = (TestModel1,)
pass
class TestModel2(TestModel1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
}
class TestDatabase2(dbcore.Database):
_models = (TestModel2,)
pass
class TestModel3(TestModel1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
}
class TestDatabase3(dbcore.Database):
_models = (TestModel3,)
pass
class TestModel4(TestModel1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
'field_four': dbcore.types.INTEGER,
}
class TestDatabase4(dbcore.Database):
_models = (TestModel4,)
pass
class AnotherTestModel(TestModel1):
_table = 'another'
_flex_table = 'anotherflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'foo': dbcore.types.INTEGER,
}
class TestDatabaseTwoModels(dbcore.Database):
_models = (TestModel2, AnotherTestModel)
pass
class TestModelWithGetters(dbcore.Model):
@classmethod
def _getters(cls):
return {'aComputedField': (lambda s: 'thing')}
def _template_funcs(self):
return {}
@_common.slow_test()
class MigrationTest(unittest.TestCase):
"""Tests the ability to change the database schema between
versions.
"""
@classmethod
def setUpClass(cls):
handle, cls.orig_libfile = mkstemp('orig_db')
os.close(handle)
# Set up a database with the two-field schema.
old_lib = TestDatabase2(cls.orig_libfile)
# Add an item to the old library.
old_lib._connection().execute(
'insert into test (field_one, field_two) values (4, 2)'
)
old_lib._connection().commit()
del old_lib
@classmethod
def tearDownClass(cls):
os.remove(cls.orig_libfile)
def setUp(self):
handle, self.libfile = mkstemp('db')
os.close(handle)
shutil.copyfile(self.orig_libfile, self.libfile)
def tearDown(self):
os.remove(self.libfile)
def test_open_with_same_fields_leaves_untouched(self):
new_lib = TestDatabase2(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(TestModel2._fields))
def test_open_with_new_field_adds_column(self):
new_lib = TestDatabase3(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(TestModel3._fields))
def test_open_with_fewer_fields_leaves_untouched(self):
new_lib = TestDatabase1(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(TestModel2._fields))
def test_open_with_multiple_new_fields(self):
new_lib = TestDatabase4(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(TestModel4._fields))
def test_extra_model_adds_table(self):
new_lib = TestDatabaseTwoModels(self.libfile)
try:
new_lib._connection().execute("select * from another")
except sqlite3.OperationalError:
self.fail("select failed")
class ModelTest(unittest.TestCase):
def setUp(self):
self.db = TestDatabase1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_add_model(self):
model = TestModel1()
model.add(self.db)
rows = self.db._connection().execute('select * from test').fetchall()
self.assertEqual(len(rows), 1)
def test_store_fixed_field(self):
model = TestModel1()
model.add(self.db)
model.field_one = 123
model.store()
row = self.db._connection().execute('select * from test').fetchone()
self.assertEqual(row['field_one'], 123)
def test_retrieve_by_id(self):
model = TestModel1()
model.add(self.db)
other_model = self.db._get(TestModel1, model.id)
self.assertEqual(model.id, other_model.id)
def test_store_and_retrieve_flexattr(self):
model = TestModel1()
model.add(self.db)
model.foo = 'bar'
model.store()
other_model = self.db._get(TestModel1, model.id)
self.assertEqual(other_model.foo, 'bar')
def test_delete_flexattr(self):
model = TestModel1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model['foo']
self.assertFalse('foo' in model)
def test_delete_flexattr_via_dot(self):
model = TestModel1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model.foo
self.assertFalse('foo' in model)
def test_delete_flexattr_persists(self):
model = TestModel1()
model.add(self.db)
model.foo = 'bar'
model.store()
model = self.db._get(TestModel1, model.id)
del model['foo']
model.store()
model = self.db._get(TestModel1, model.id)
self.assertFalse('foo' in model)
def test_delete_non_existent_attribute(self):
model = TestModel1()
with self.assertRaises(KeyError):
del model['foo']
def test_delete_fixed_attribute(self):
model = TestModel1()
with self.assertRaises(KeyError):
del model['field_one']
def test_null_value_normalization_by_type(self):
model = TestModel1()
model.field_one = None
self.assertEqual(model.field_one, 0)
def test_null_value_stays_none_for_untyped_field(self):
model = TestModel1()
model.foo = None
self.assertEqual(model.foo, None)
def test_normalization_for_typed_flex_fields(self):
model = TestModel1()
model.some_float_field = None
self.assertEqual(model.some_float_field, 0.0)
def test_load_deleted_flex_field(self):
model1 = TestModel1()
model1['flex_field'] = True
model1.add(self.db)
model2 = self.db._get(TestModel1, model1.id)
self.assertIn('flex_field', model2)
del model1['flex_field']
model1.store()
model2.load()
self.assertNotIn('flex_field', model2)
def test_check_db_fails(self):
with assertRaisesRegex(self, ValueError, 'no database'):
dbcore.Model()._check_db()
with assertRaisesRegex(self, ValueError, 'no id'):
TestModel1(self.db)._check_db()
dbcore.Model(self.db)._check_db(need_id=False)
def test_missing_field(self):
with self.assertRaises(AttributeError):
TestModel1(self.db).nonExistingKey
def test_computed_field(self):
model = TestModelWithGetters()
self.assertEqual(model.aComputedField, 'thing')
with assertRaisesRegex(self, KeyError, u'computed field .+ deleted'):
del model.aComputedField
def test_items(self):
model = TestModel1(self.db)
model.id = 5
self.assertEqual({('id', 5), ('field_one', None)},
set(model.items()))
def test_delete_internal_field(self):
model = dbcore.Model()
del model._db
with self.assertRaises(AttributeError):
model._db
def test_parse_nonstring(self):
with assertRaisesRegex(self, TypeError, u"must be a string"):
dbcore.Model._parse(None, 42)
class FormatTest(unittest.TestCase):
def test_format_fixed_field(self):
model = TestModel1()
model.field_one = u'caf\xe9'
value = model.formatted().get('field_one')
self.assertEqual(value, u'caf\xe9')
def test_format_flex_field(self):
model = TestModel1()
model.other_field = u'caf\xe9'
value = model.formatted().get('other_field')
self.assertEqual(value, u'caf\xe9')
def test_format_flex_field_bytes(self):
model = TestModel1()
model.other_field = u'caf\xe9'.encode('utf8')
value = model.formatted().get('other_field')
self.assertTrue(isinstance(value, six.text_type))
self.assertEqual(value, u'caf\xe9')
def test_format_unset_field(self):
model = TestModel1()
value = model.formatted().get('other_field')
self.assertEqual(value, u'')
def test_format_typed_flex_field(self):
model = TestModel1()
model.some_float_field = 3.14159265358979
value = model.formatted().get('some_float_field')
self.assertEqual(value, u'3.1')
class FormattedMappingTest(unittest.TestCase):
def test_keys_equal_model_keys(self):
model = TestModel1()
formatted = model.formatted()
self.assertEqual(set(model.keys(True)), set(formatted.keys()))
def test_get_unset_field(self):
model = TestModel1()
formatted = model.formatted()
with self.assertRaises(KeyError):
formatted['other_field']
def test_get_method_with_default(self):
model = TestModel1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field'), u'')
def test_get_method_with_specified_default(self):
model = TestModel1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field', 'default'), 'default')
class ParseTest(unittest.TestCase):
def test_parse_fixed_field(self):
value = TestModel1._parse('field_one', u'2')
self.assertIsInstance(value, int)
self.assertEqual(value, 2)
def test_parse_flex_field(self):
value = TestModel1._parse('some_float_field', u'2')
self.assertIsInstance(value, float)
self.assertEqual(value, 2.0)
def test_parse_untyped_field(self):
value = TestModel1._parse('field_nine', u'2')
self.assertEqual(value, u'2')
class QueryParseTest(unittest.TestCase):
def pqp(self, part):
return dbcore.queryparse.parse_query_part(
part,
{'year': dbcore.query.NumericQuery},
{':': dbcore.query.RegexpQuery},
)[:-1] # remove the negate flag
def test_one_basic_term(self):
q = 'test'
r = (None, 'test', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_keyed_term(self):
q = 'test:val'
r = ('test', 'val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_colon_at_end(self):
q = 'test:'
r = ('test', '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_basic_regexp(self):
q = r':regexp'
r = (None, 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_keyed_regexp(self):
q = r'test::regexp'
r = ('test', 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon(self):
q = r'test\:val'
r = (None, 'test:val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon_in_regexp(self):
q = r':test\:regexp'
r = (None, 'test:regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_single_year(self):
q = 'year:1999'
r = ('year', '1999', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_multiple_years(self):
q = 'year:1999..2010'
r = ('year', '1999..2010', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_empty_query_part(self):
q = ''
r = (None, '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
class QueryFromStringsTest(unittest.TestCase):
def qfs(self, strings):
return dbcore.queryparse.query_from_strings(
dbcore.query.AndQuery,
TestModel1,
{':': dbcore.query.RegexpQuery},
strings,
)
def test_zero_parts(self):
q = self.qfs([])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 1)
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
def test_two_parts(self):
q = self.qfs(['foo', 'bar:baz'])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 2)
self.assertIsInstance(q.subqueries[0], dbcore.query.AnyFieldQuery)
self.assertIsInstance(q.subqueries[1], dbcore.query.SubstringQuery)
def test_parse_fixed_type_query(self):
q = self.qfs(['field_one:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_parse_flex_type_query(self):
q = self.qfs(['some_float_field:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_empty_query_part(self):
q = self.qfs([''])
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
class SortFromStringsTest(unittest.TestCase):
def sfs(self, strings):
return dbcore.queryparse.sort_from_strings(
TestModel1,
strings,
)
def test_zero_parts(self):
s = self.sfs([])
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(s, dbcore.query.NullSort())
def test_one_parts(self):
s = self.sfs(['field+'])
self.assertIsInstance(s, dbcore.query.Sort)
def test_two_parts(self):
s = self.sfs(['field+', 'another_field-'])
self.assertIsInstance(s, dbcore.query.MultipleSort)
self.assertEqual(len(s.sorts), 2)
def test_fixed_field_sort(self):
s = self.sfs(['field_one+'])
self.assertIsInstance(s, dbcore.query.FixedFieldSort)
self.assertEqual(s, dbcore.query.FixedFieldSort('field_one'))
def test_flex_field_sort(self):
s = self.sfs(['flex_field+'])
self.assertIsInstance(s, dbcore.query.SlowFieldSort)
self.assertEqual(s, dbcore.query.SlowFieldSort('flex_field'))
def test_special_sort(self):
s = self.sfs(['some_sort+'])
self.assertIsInstance(s, TestSort)
class ParseSortedQueryTest(unittest.TestCase):
def psq(self, parts):
return dbcore.parse_sorted_query(
TestModel1,
parts.split(),
)
def test_and_query(self):
q, s = self.psq('foo bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_or_query(self):
q, s = self.psq('foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_space_before_comma_or_query(self):
q, s = self.psq('foo, bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_spaces_or_query(self):
q, s = self.psq('foo,bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
def test_trailing_comma_or_query(self):
q, s = self.psq('foo , bar ,')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_leading_comma_or_query(self):
q, s = self.psq(', foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_only_direction(self):
q, s = self.psq('-')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
class ResultsIteratorTest(unittest.TestCase):
def setUp(self):
self.db = TestDatabase1(':memory:')
model = TestModel1()
model['foo'] = 'baz'
model.add(self.db)
model = TestModel1()
model['foo'] = 'bar'
model.add(self.db)
def tearDown(self):
self.db._connection().close()
def test_iterate_once(self):
objs = self.db._fetch(TestModel1)
self.assertEqual(len(list(objs)), 2)
def test_iterate_twice(self):
objs = self.db._fetch(TestModel1)
list(objs)
self.assertEqual(len(list(objs)), 2)
def test_concurrent_iterators(self):
results = self.db._fetch(TestModel1)
it1 = iter(results)
it2 = iter(results)
next(it1)
list(it2)
self.assertEqual(len(list(it1)), 1)
def test_slow_query(self):
q = dbcore.query.SubstringQuery('foo', 'ba', False)
objs = self.db._fetch(TestModel1, q)
self.assertEqual(len(list(objs)), 2)
def test_slow_query_negative(self):
q = dbcore.query.SubstringQuery('foo', 'qux', False)
objs = self.db._fetch(TestModel1, q)
self.assertEqual(len(list(objs)), 0)
def test_iterate_slow_sort(self):
s = dbcore.query.SlowFieldSort('foo')
res = self.db._fetch(TestModel1, sort=s)
objs = list(res)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_unsorted_subscript(self):
objs = self.db._fetch(TestModel1)
self.assertEqual(objs[0].foo, 'baz')
self.assertEqual(objs[1].foo, 'bar')
def test_slow_sort_subscript(self):
s = dbcore.query.SlowFieldSort('foo')
objs = self.db._fetch(TestModel1, sort=s)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_length(self):
objs = self.db._fetch(TestModel1)
self.assertEqual(len(objs), 2)
def test_out_of_range(self):
objs = self.db._fetch(TestModel1)
with self.assertRaises(IndexError):
objs[100]
def test_no_results(self):
self.assertIsNone(self.db._fetch(
TestModel1, dbcore.query.FalseQuery()).get())
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
jcoady9/beets
|
test/test_dbcore.py
|
Python
|
mit
| 20,402
|
# Generated by Django 2.2.5 on 2019-09-26 12:18
from django.db import migrations, models
import weblate.utils.backup
class Migration(migrations.Migration):
dependencies = [("wladmin", "0005_auto_20190926_1332")]
operations = [
migrations.AddField(
model_name="backupservice",
name="paperkey",
field=models.TextField(default=""),
preserve_default=False,
),
migrations.AddField(
model_name="backupservice",
name="passphrase",
field=models.CharField(
default=weblate.utils.backup.make_password, max_length=100
),
),
migrations.AlterField(
model_name="backuplog",
name="event",
field=models.CharField(
choices=[
("backup", "Backup performed"),
("prune", "Deleted the oldest backups"),
("init", "Repository initialization"),
],
max_length=100,
),
),
migrations.AlterField(
model_name="backupservice",
name="repository",
field=models.CharField(
default="", max_length=500, verbose_name="Backup repository"
),
),
]
|
dontnod/weblate
|
weblate/wladmin/migrations/0006_auto_20190926_1218.py
|
Python
|
gpl-3.0
| 1,322
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.