text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as n_constants
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = oslo_messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
mac = port['mac_address']
pci_slot = None
if port.get('binding:profile'):
pci_slot = port['binding:profile'].get('pci_slot')
if pci_slot:
self.agent.updated_devices.add((mac, pci_slot))
LOG.debug("port_update RPC received for port: %(id)s with MAC "
"%(mac)s and PCI slot %(pci_slot)s slot",
{'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
"skipping", {'id': port['id'], 'mac': mac,
'pci_slot': pci_slot})
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.conf = cfg.CONF
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,
'topic': n_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.mac_to_port_id_mapping = {}
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices_info())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _create_agent_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
mgr = ext_manager.AgentExtensionsManager(self.conf)
mgr.initialize(connection, 'sriov')
return mgr
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices_info()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
spoofcheck)
except Exception:
LOG.warning(_LW("Failed to set spoofcheck for device %s"),
device)
LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
{"device": device, "spoofcheck": spoofcheck})
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices_info):
try:
macs_list = set([device_info[0] for device_info in devices_info])
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, macs_list, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC addresses %(devices)s: %(e)s",
{'devices': macs_list, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
port_id = device_details['port_id']
self.mac_to_port_id_mapping[device] = port_id
profile = device_details['profile']
spoofcheck = device_details.get('port_security_enabled', True)
self.treat_device(device,
profile.get('pci_slot'),
device_details['admin_state_up'],
spoofcheck)
self.ext_manager.handle_port(self.context, device_details)
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
mac, pci_slot = device
LOG.info(_LI("Removing device with MAC address %(mac)s and "
"PCI slot %(pci_slot)s"),
{'mac': mac, 'pci_slot': pci_slot})
try:
port_id = self.mac_to_port_id_mapping.get(mac)
if port_id:
profile = {'pci_slot': pci_slot}
port = {'port_id': port_id,
'device': mac,
'profile': profile}
self.ext_manager.delete_port(self.context, port)
del self.mac_to_port_id_mapping[mac]
else:
LOG.warning(_LW("port_id to device with MAC "
"%s not found"), mac)
dev_details = self.plugin_rpc.update_device_down(self.context,
mac,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device with MAC address "
"%(mac)s and PCI slot %(pci_slot)s due to %(exc)s",
{'mac': mac, 'pci_slot': pci_slot, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port with MAC %(mac)s and PCI slot "
"%(pci_slot)s updated."),
{'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("Device with MAC %(mac)s and PCI slot "
"%(pci_slot)s not defined on plugin",
{'mac': mac, 'pci_slot': pci_slot})
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = n_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.values())
for dev_name in self.exclude_devices.keys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
|
silenci/neutron
|
neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
|
Python
|
apache-2.0
| 17,661
| 0.001019
|
from conn import Connection
import dispatch
import socket
class Acceptor(Connection):
def __init__(self, port):
self.dispatcher = dispatch.Dispatch(1)
self.dispatcher.start()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.bind(("127.0.0.1", port))
self.sock.listen(1024)
def handleRead(self):
cli, addr = self.sock.accept()
# cli.setblocking(0)
self.dispatcher.dispatch(cli)
|
chimmu/hailuo
|
acceptor.py
|
Python
|
gpl-2.0
| 474
| 0.006329
|
from flask import Flask
from flask.ext.script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
@app.route('/user/<name>')
def user(name):
return '<h1>Hello, {name}!</h1>'.format(**locals())
if __name__ == '__main__':
manager.run()
|
xuehao/stickpython
|
Flask_Web_Development/chapter_02/2c/hello_2c.py
|
Python
|
mit
| 320
| 0.003125
|
#
# Copyright (c) 2017 Sugimoto Takaaki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import urllib
import json
from collections import OrderedDict
# dictionary of api url
d = OrderedDict()
d['btc']='https://api.cryptonator.com/api/ticker/btc-usd'
d['ltc']='https://api.cryptonator.com/api/ticker/ltc-usd'
d['doge']='https://api.cryptonator.com/api/ticker/doge-usd'
d['xrp']='https://api.cryptonator.com/api/ticker/xrp-usd'
d['eth']='https://api.cryptonator.com/api/ticker/eth-usd'
d['mona']='https://api.cryptonator.com/api/ticker/mona-usd'
outputString = ""
for url in d.values():
sock = urllib.urlopen(url)
jsonString = sock.read()
sock.close()
jsonCurrency = json.loads(jsonString)
price = jsonCurrency['ticker']['price']
outputString = outputString + price + " "
print outputString
|
sugimotokun/VirtualCurrencySplunk
|
bin/scripts/vc_usd_nt.py
|
Python
|
apache-2.0
| 1,321
| 0.004542
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageTest, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?'
'detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertIn(servers[j]['instance_id'], uuids)
def test_verify_show_cant_view_other_tenant(self):
req = webob.Request.blank(
'/v2/faketenant_1/os-simple-tenant-usage/'
'faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
"compute_extension:simple_tenant_usage:show":
common_policy.parse_rule([
["role:admin"], ["project_id:%(project_id)s"]
])
}
common_policy.set_rules(common_policy.Rules(rules))
try:
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.alt_user_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
tenant_id = 0
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, future.isoformat(), NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 400)
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
self.assertEqual('server_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def _verify_tenant_usage(self, raw_usage, tree):
self.assertEqual('tenant_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
if child.tag == 'server_usages':
for idx, gr_child in enumerate(child):
self._verify_server_usage(raw_usage['server_usages'][idx],
gr_child)
else:
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def test_serializer_show(self):
serializer = simple_tenant_usage.SimpleTenantUsageTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
total_local_gb_usage=789,
total_vcpus_usage=456,
total_memory_mb_usage=123,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000000',
name='test',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=1,
tenant_id='tenant',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=12,
memory_mb=512,
local_gb=25,
vcpus=2,
tenant_id='tenant',
flavor='m1.tiny',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=43200),
],
)
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
def test_serializer_index(self):
serializer = simple_tenant_usage.SimpleTenantUsagesTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',
total_local_gb_usage=1024,
total_vcpus_usage=23,
total_memory_mb_usage=512,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000001',
name='test1',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant1',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant1',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
dict(
tenant_id='tenant2',
total_local_gb_usage=512,
total_vcpus_usage=32,
total_memory_mb_usage=1024,
total_hours=42,
start=today,
stop=yesterday,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000003',
name='test3',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant2',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant4',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
]
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
self.assertEqual(len(raw_usages), len(tree))
for idx, child in enumerate(tree):
self._verify_tenant_usage(raw_usages[idx], child)
class SimpleTenantUsageControllerTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageControllerTest, self).setUp()
self.controller = simple_tenant_usage.SimpleTenantUsageController()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
# convert the fake instance dict to an object
self.inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), self.baseinst)
def test_get_flavor_from_sys_meta(self):
# Non-deleted instances get their type information from their
# system_metadata
with mock.patch.object(db, 'instance_get_by_uuid',
return_value=self.baseinst):
flavor = self.controller._get_flavor(self.context,
self.inst_obj, {})
self.assertEqual(flavor_obj.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_non_deleted_with_id_fails(self):
# If an instance is not deleted and missing type information from
# system_metadata, then that's a bug
self.inst_obj.system_metadata = {}
self.assertRaises(KeyError,
self.controller._get_flavor, self.context,
self.inst_obj, {})
def test_get_flavor_from_deleted_with_id(self):
# Deleted instances may not have type info in system_metadata,
# so verify that they get their type from a lookup of their
# instance_type_id
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(flavor_obj.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_deleted_with_id_of_deleted(self):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
|
tanglei528/nova
|
nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
|
Python
|
apache-2.0
| 19,168
| 0.000313
|
#!/usr/bin/env python3
"""
This housekeeping script reads a GFF3 file and writes a new one, adding a 'gene'
row for any RNA feature which doesn't have one. The coordinates of the RNA will
be copied.
The initial use-case here was a GFF file dumped from WebApollo which had this issue.
In this particular use case, the orphan mRNAs have ID attributes but no Parent
though this is corrected.
INPUT EXAMPLE:
###
ChromosomeII_BmicrotiR1 IGS mRNA 1467897 1468187 . + . Name=ChromosomeII_BmicrotiR1:1467871-1468187;ID=101D714C468A44840D49A6FAAD27AFE5
ChromosomeII_BmicrotiR1 IGS exon 1467897 1468187 . + . Name=DE1443B2DABA5DEDBDEBE79EB433EEB8;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=DE1443B2DABA5DEDBDEBE79EB433EEB8
ChromosomeII_BmicrotiR1 IGS CDS 1467897 1468187 . + 0 Name=101D714C468A44840D49A6FAAD27AFE5-CDS;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=101D714C468A44840D49A6FAAD27AFE5-CDS
Author: Joshua Orvis
"""
import argparse
from biocode import gff
def main():
parser = argparse.ArgumentParser( description='Adds gene features for RNAs which lack them')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input GFF3 file' )
parser.add_argument('-o', '--output', type=str, required=True, help='Output GFF3 file to write' )
args = parser.parse_args()
infile = open(args.input)
ofh = open(args.output, 'wt')
for line in infile:
if line.startswith('#'):
ofh.write(line)
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
ofh.write("{0}\n".format(line) )
continue
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
if cols[2].endswith('RNA') and parent is None:
gene_cols = list(cols)
gene_cols[2] = 'gene'
gene_cols[8] = gff.set_column_9_value(gene_cols[8], 'ID', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(gene_cols)) )
cols[8] = gff.set_column_9_value(cols[8], 'Parent', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(cols)) )
else:
ofh.write("{0}\n".format(line) )
if __name__ == '__main__':
main()
|
jorvis/biocode
|
sandbox/jorvis/correct_RNAs_missing_genes.py
|
Python
|
mit
| 2,324
| 0.009897
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
class TestTieCorrect(object):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData(object):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
max_rank = lambda a: [sum(i <= j for i in a) for j in a]
ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
|
aeklant/scipy
|
scipy/stats/tests/test_rank.py
|
Python
|
bsd-3-clause
| 7,448
| 0.000403
|
import logging
from ...engines.light import SimEngineLight
from ...errors import SimEngineError
l = logging.getLogger(name=__name__)
class SimEnginePropagatorBase(SimEngineLight): # pylint:disable=abstract-method
def __init__(self, stack_pointer_tracker=None, project=None):
super().__init__()
# Used in the VEX engine
self._project = project
self.base_state = None
self._load_callback = None
# Used in the AIL engine
self._stack_pointer_tracker = stack_pointer_tracker
def process(self, state, *args, **kwargs):
self.project = kwargs.pop('project', None)
self.base_state = kwargs.pop('base_state', None)
self._load_callback = kwargs.pop('load_callback', None)
try:
self._process(state, None, block=kwargs.pop('block', None))
except SimEngineError as ex:
if kwargs.pop('fail_fast', False) is True:
raise ex
l.error(ex, exc_info=True)
return self.state
|
iamahuman/angr
|
angr/analyses/propagator/engine_base.py
|
Python
|
bsd-2-clause
| 1,026
| 0.001949
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
|
gangadhar-kadam/verve_test_erp
|
erpnext/manufacturing/doctype/production_order/test_production_order.py
|
Python
|
agpl-3.0
| 5,084
| 0.023013
|
import numpy as np
import cv2
from matplotlib import pylab as plt
# Ref: http://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/
picNumber = 1
filename = "/home/cwu/project/stereo-calibration/calib_imgs/3/left/left_" + str(picNumber) +".jpg"
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create()
# find the keypoints with STAR
kp = orb.detect(img,None)
# compute the descriptors with BRIEF
kp, des = orb.compute(img, kp)
img = cv2.drawKeypoints(img,kp,None,(0,255,0),4)
cv2.imshow('img',img)
cv2.waitKey(1000)
cv2.imwrite('orb_keypoints.jpg',img)
|
chaowu2009/stereo-vo
|
tools/test_ORB.py
|
Python
|
mit
| 621
| 0.022544
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '__first__'),
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page', on_delete=models.CASCADE)),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Gallery',
'verbose_name_plural': 'Galleries',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='GalleryImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')),
('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)),
('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery', on_delete=models.CASCADE)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
bases=(models.Model,),
),
]
|
christianwgd/mezzanine
|
mezzanine/galleries/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 1,889
| 0.004235
|
# -*- coding: utf-8 -*-
from wikitools.api import APIRequest
from wikitools.wiki import Wiki
from wikitools.page import Page
from urllib2 import quote
pairs = [
['"', '"'],
['(', ')'],
['[', ']'],
['{', '}'],
['<!--', '-->'],
['<', '>'],
['<gallery', '</gallery>'],
['<includeonly>', '</includeonly>'],
['<noinclude>', '</noinclude>'],
['<onlyinclude>', '</onlyinclude>'],
['<small>', '</small>'],
['<table>', '</table>'],
['<td>', '</td>'],
['<tr>', '</tr>'],
]
wiki = Wiki('http://wiki.teamfortress.com/w/api.php')
# Returns a list of unmatched element indices.
def find_mismatch(text, pair):
problems = []
for i, char in enumerate(text):
if char == pair[0]:
problems.append(i)
if char == pair[1]:
try:
problems.pop()
except IndexError:
return [i]
return problems
params = {
'action': 'query',
'list': 'allpages',
'apfilterredir': 'nonredirects',
'aplimit': '500',
}
titles = set()
req = APIRequest(wiki, params)
for result in req.queryGen():
for article in result['query']['allpages']:
titles.add(article['title'])
titles = list(titles)
titles.sort()
print 'Found', len(titles), 'pages'
for title in titles:
page = Page(wiki, title)
page.getWikiText()
text = page.getWikiText().lower()
printed_link = False
for pair in pairs:
if text.count(pair[0]) != text.count(pair[1]):
if not printed_link:
print '='*80
print 'https://wiki.teamfortress.com/w/index.php?action=edit&title=%s' % quote(title.encode('utf-8'))
printed_link = True
indices = find_mismatch(text, pair)
print '-'*80
print pair
for index in indices:
print '-'*80
print text[index-100:index+100]
|
jbzdarkid/Random
|
mismatched.py
|
Python
|
apache-2.0
| 1,737
| 0.011514
|
'''
Created on Jun 6, 2012
@author: vr274
'''
import numpy as np
from generic import TakestepSlice, TakestepInterface
from pele.utils import rotations
__all__ = ["RandomDisplacement", "UniformDisplacement",
"RotationalDisplacement", "RandomCluster"]
class RandomDisplacement(TakestepSlice):
'''Random displacement on each individual coordinate
RandomDisplacement is the most basic step taking routine. It simply
displaces each coordinate my a random value.
Parameters
----------
stepsize : float
magnitue of random displacement
'''
def __init__(self, stepsize=1.0):
TakestepSlice.__init__(self, stepsize=stepsize)
def takeStep(self, coords, **kwargs):
coords[self.srange] += np.random.uniform(low=-self.stepsize, high=self.stepsize, size=coords[self.srange].shape)
class UniformDisplacement(TakestepSlice):
'''Displace each atom be a uniform random vector
The routine generates a proper uniform random unitvector to displace
atoms.
'''
def takeStep(self, coords, **kwargs):
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
x += self.stepsize * rotations.vector_random_uniform_hypersphere(3)
class RotationalDisplacement(TakestepSlice):
'''Random rotation for angle axis vector
RotationalDisplacement performs a proper random rotation. If the coordinate array contains
positions and orientations, make sure to specify the correct slice for the angle axis
coordinates.
'''
def takeStep(self, coords, **kwargs):
"""
take a random orientational step
"""
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
rotations.takestep_aa(x, self.stepsize)
class RandomCluster(TakestepInterface):
'''Generate a random configuration
'''
def __init__(self, volume=1.0):
self.volume = volume
def takeStep(self, coords, **kwargs):
coords[:] = np.random.random(coords.shape) * (self.volume**(1./3.))
|
js850/pele
|
pele/takestep/displace.py
|
Python
|
gpl-3.0
| 2,126
| 0.01317
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scatterpolar.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterpolar/marker/_sizemode.py
|
Python
|
mit
| 537
| 0.001862
|
#!/usr/bin/python
# $Id:$
from base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.win32 import _kernel32, _user32, types, constants
from pyglet.libs.win32.constants import *
from pyglet.libs.win32.types import *
class Win32Display(Display):
def get_screens(self):
screens = []
def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
width = r.right - r.left
height = r.bottom - r.top
screens.append(
Win32Screen(self, hMonitor, r.left, r.top, width, height))
return True
enum_proc_type = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM)
enum_proc_ptr = enum_proc_type(enum_proc)
_user32.EnumDisplayMonitors(NULL, NULL, enum_proc_ptr, 0)
return screens
class Win32Screen(Screen):
_initial_mode = None
def __init__(self, display, handle, x, y, width, height):
super(Win32Screen, self).__init__(display, x, y, width, height)
self._handle = handle
def get_matching_configs(self, template):
canvas = Win32Canvas(self.display, 0, _user32.GetDC(0))
configs = template.match(canvas)
# XXX deprecate config's being screen-specific
for config in configs:
config.screen = self
return configs
def get_device_name(self):
info = MONITORINFOEX()
info.cbSize = sizeof(MONITORINFOEX)
_user32.GetMonitorInfoW(self._handle, byref(info))
return info.szDevice
def get_modes(self):
device_name = self.get_device_name()
i = 0
modes = []
while True:
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
r = _user32.EnumDisplaySettingsW(device_name, i, byref(mode))
if not r:
break
modes.append(Win32ScreenMode(self, mode))
i += 1
return modes
def get_mode(self):
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
_user32.EnumDisplaySettingsW(self.get_device_name(),
ENUM_CURRENT_SETTINGS,
byref(mode))
return Win32ScreenMode(self, mode)
def set_mode(self, mode):
assert mode.screen is self
if not self._initial_mode:
self._initial_mode = self.get_mode()
r = _user32.ChangeDisplaySettingsExW(self.get_device_name(),
byref(mode._mode),
None,
CDS_FULLSCREEN,
None)
if r == DISP_CHANGE_SUCCESSFUL:
self.width = mode.width
self.height = mode.height
def restore_mode(self):
if self._initial_mode:
self.set_mode(self._initial_mode)
class Win32ScreenMode(ScreenMode):
def __init__(self, screen, mode):
super(Win32ScreenMode, self).__init__(screen)
self._mode = mode
self.width = mode.dmPelsWidth
self.height = mode.dmPelsHeight
self.depth = mode.dmBitsPerPel
self.rate = mode.dmDisplayFrequency
class Win32Canvas(Canvas):
def __init__(self, display, hwnd, hdc):
super(Win32Canvas, self).__init__(display)
self.hwnd = hwnd
self.hdc = hdc
|
joaormatos/anaconda
|
Anaconda/pyglet/canvas/win32.py
|
Python
|
gpl-3.0
| 3,404
| 0.00235
|
import re
import sys
def is_self_describing(n):
for i in range(len(n)):
c = n[i]
if int(c) != len(re.findall(str(i), n)):
return False
return True
with open(sys.argv[1], 'r') as fh:
for line in fh.readlines():
line = line.strip()
if line == '':
continue
print 1 if is_self_describing(line) else 0
|
cadyyan/codeeval
|
python/40_self_describing_numbers.py
|
Python
|
gpl-3.0
| 379
| 0.007916
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","rodian_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.py
|
Python
|
mit
| 476
| 0.046218
|
from app import db
from app.model import DirectionStatistic
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
def create_range_figure2(sender_id):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
def create_range_figure(sender_id):
sds = db.session.query(DirectionStatistic) \
.filter(DirectionStatistic.sender_id == sender_id) \
.order_by(DirectionStatistic.directions_count.desc()) \
.limit(1) \
.one()
fig = Figure()
direction_data = sds.direction_data
max_range = max([r['max_range'] / 1000.0 for r in direction_data])
theta = np.array([i['direction'] / 180 * np.pi for i in direction_data])
radii = np.array([i['max_range'] / 1000 if i['max_range'] > 0 else 0 for i in direction_data])
width = np.array([13 / 180 * np.pi for i in direction_data])
colors = plt.cm.viridis(radii / max_range)
ax = fig.add_subplot(111, projection='polar')
ax.bar(theta, radii, width=width, bottom=0.0, color=colors, edgecolor='b', alpha=0.5)
#ax.set_rticks([0, 25, 50, 75, 100, 125, 150])
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
fig.suptitle(f"Range between sender '{sds.sender.name}' and receiver '{sds.receiver.name}'")
return fig
|
glidernet/ogn-python
|
app/main/matplotlib_service.py
|
Python
|
agpl-3.0
| 1,400
| 0.002857
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for visualizations."""
import inspect
from extensions.visualizations import models
class Registry(object):
"""Registry of all visualizations."""
# Dict mapping visualization class names to their classes.
visualizations_dict = {}
@classmethod
def _refresh_registry(cls):
"""Clears and adds new visualization instances to the registry."""
cls.visualizations_dict.clear()
# Add new visualization instances to the registry.
for name, clazz in inspect.getmembers(
models, predicate=inspect.isclass):
if name.endswith('_test') or name == 'BaseVisualization':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseVisualization' in ancestor_names:
cls.visualizations_dict[clazz.__name__] = clazz
@classmethod
def get_visualization_class(cls, visualization_id):
"""Gets a visualization class by its id (which is also its class name).
The registry will refresh if the desired class is not found. If it's
still not found after the refresh, this method will throw an error.
"""
if visualization_id not in cls.visualizations_dict:
cls._refresh_registry()
if visualization_id not in cls.visualizations_dict:
raise TypeError(
'\'%s\' is not a valid visualization id.' % visualization_id)
return cls.visualizations_dict[visualization_id]
@classmethod
def get_all_visualization_ids(cls):
"""Gets a visualization class by its id
(which is also its class name).
"""
if not cls.visualizations_dict:
cls._refresh_registry()
return cls.visualizations_dict.keys()
|
souravbadami/oppia
|
core/domain/visualization_registry.py
|
Python
|
apache-2.0
| 2,447
| 0
|
__all__ = [
"getMin"
]
__doc__ = "Different algorithms used for optimization"
import Optizelle.Unconstrained.State
import Optizelle.Unconstrained.Functions
from Optizelle.Utility import *
from Optizelle.Properties import *
from Optizelle.Functions import *
def getMin(X, msg, fns, state, smanip=None):
"""Solves an unconstrained optimization problem
Basic solve: getMin(X,msg,fns,state)
Solve with a state manipulator: getMin(X,msg,fns,state,smanip)
"""
if smanip is None:
smanip = StateManipulator()
# Check the arguments
checkVectorSpace("X",X)
checkMessaging("msg",msg)
Optizelle.Unconstrained.Functions.checkT("fns",fns)
Optizelle.Unconstrained.State.checkT("state",state)
checkStateManipulator("smanip",smanip)
# Call the optimization
UnconstrainedAlgorithmsGetMin(X,msg,fns,state,smanip)
|
OptimoJoe/Optizelle
|
src/python/Optizelle/Unconstrained/Algorithms.py
|
Python
|
bsd-2-clause
| 863
| 0.011587
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API client library."""
import logging
from . import error
from . import resource
logger = logging.getLogger('timesketch_api.user')
class User(resource.BaseResource):
"""User object."""
def __init__(self, api):
"""Initializes the user object."""
self._object_data = None
resource_uri = 'users/me/'
super().__init__(api, resource_uri)
def _get_data(self):
"""Returns dict from the first object of the resource data."""
if self._object_data:
return self._object_data
data = self.data
objects = data.get('objects')
if objects:
self._object_data = objects[0]
else:
self._object_data = {}
return self._object_data
def change_password(self, new_password):
"""Change the password for the user.
Args:
new_password (str): String with the password.
Raises:
ValueError: If there was an error.
Returns:
Boolean: Whether the password was successfully modified.
"""
if not new_password:
raise ValueError('No new password supplied.')
if not isinstance(new_password, str):
raise ValueError('Password needs to be a string value.')
data = {'password': new_password}
resource_url = f'{self.api.api_root}/{self.resource_uri}'
response = self.api.session.post(resource_url, json=data)
return error.check_return_status(response, logger)
@property
def groups(self):
"""Property that returns the groups the user belongs to."""
data = self._get_data()
groups = data.get('groups', [])
return [x.get('name', '') for x in groups]
@property
def is_active(self):
"""Property that returns bool indicating whether the user is active."""
data = self._get_data()
return data.get('active', True)
@property
def is_admin(self):
"""Property that returns bool indicating whether the user is admin."""
data = self._get_data()
return data.get('admin', False)
@property
def username(self):
"""Property that returns back the username of the current user."""
data = self._get_data()
return data.get('username', 'Unknown')
def __str__(self):
"""Returns a string representation of the username."""
user_strings = [self.username]
if self.is_active:
user_strings.append('[active]')
else:
user_strings.append('[inactive]')
if self.is_admin:
user_strings.append('<is admin>')
return ' '.join(user_strings)
|
google/timesketch
|
api_client/python/timesketch_api_client/user.py
|
Python
|
apache-2.0
| 3,300
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
class Options:
def __init__(self):
self.color = "black"
self.verbose = False
pass
|
LaurentCabaret/pyVhdl2Sch
|
tools/tools.py
|
Python
|
bsd-2-clause
| 177
| 0
|
# -*- coding: utf-8 -*-
import csv
import datetime
import os
import shutil
import json
from django.http import Http404
from django.test.client import RequestFactory
import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.api.tests.utils import APIKeyAuthTestCase
from olympia.bandwagon.models import Collection
from olympia.stats import views, tasks
from olympia.stats import search
from olympia.stats.models import (
CollectionCount, DownloadCount, GlobalStat, ThemeUserCount, UpdateCount,
ThemeUpdateCount)
from olympia.stats.management.commands import save_stats_to_file
from olympia.users.models import UserProfile
class StatsTest(TestCase):
fixtures = ['stats/test_views.json', 'stats/test_models.json']
def setUp(self):
"""Setup some reasonable testing defaults."""
super(StatsTest, self).setUp()
# Default url_args to an addon and range with data.
self.url_args = {'start': '20090601', 'end': '20090930', 'addon_id': 4}
self.url_args_theme = {'start': '20090601', 'end': '20090930',
'addon_id': 6}
# Most tests don't care about permissions.
self.login_as_admin()
def login_as_admin(self):
self.client.logout()
self.client.login(username='jbalogh@mozilla.com', password='password')
def login_as_visitor(self):
self.client.logout()
self.client.login(username='nobodyspecial@mozilla.com',
password='password')
def get_view_response(self, view, **kwargs):
view_args = self.url_args.copy()
head = kwargs.pop('head', False)
view_args.update(kwargs)
url = reverse(view, kwargs=view_args)
if head:
return self.client.head(url, follow=True)
return self.client.get(url, follow=True)
def views_gen(self, **kwargs):
# common set of views
for series in views.SERIES:
for group in views.SERIES_GROUPS:
view = 'stats.%s_series' % series
args = kwargs.copy()
args['group'] = group
yield (view, args)
def public_views_gen(self, **kwargs):
# all views are potentially public, except for contributions
for view, args in self.views_gen(**kwargs):
if not view.startswith('stats.contributions'):
yield (view, args)
def private_views_gen(self, **kwargs):
# only contributions views are always private
for view, args in self.views_gen(**kwargs):
if view.startswith('stats.contributions'):
yield (view, args)
def _check_it(self, views, status):
for view, kwargs in views:
response = self.get_view_response(view, head=True, **kwargs)
assert response.status_code == status
class TestUnlistedAddons(StatsTest):
def setUp(self):
super(TestUnlistedAddons, self).setUp()
Addon.objects.get(pk=4).update(is_listed=False)
def test_no_stats_for_unlisted_addon(self):
"""All the views for the stats return 404 for unlisted addons."""
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 404)
self._check_it(self.private_views_gen(format='json'), 404)
def test_stats_for_unlisted_addon_owner(self):
"""All the views for the stats return 404 for unlisted addons owner."""
self.login_as_admin()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 200)
class ESStatsTest(StatsTest, amo.tests.ESTestCase):
"""Test class with some ES setup."""
def setUp(self):
super(ESStatsTest, self).setUp()
self.empty_index('stats')
self.index()
def index(self):
updates = UpdateCount.objects.values_list('id', flat=True)
tasks.index_update_counts(list(updates))
downloads = DownloadCount.objects.values_list('id', flat=True)
tasks.index_download_counts(list(downloads))
user_counts = ThemeUserCount.objects.values_list('id', flat=True)
tasks.index_theme_user_counts(list(user_counts))
self.refresh('stats')
def csv_eq(self, response, expected):
content = csv.DictReader(
# Drop lines that are comments.
filter(lambda row: row[0] != '#', response.content.splitlines()))
expected = csv.DictReader(
# Strip any extra spaces from the expected content.
line.strip() for line in expected.splitlines())
assert tuple(content) == tuple(expected)
class TestSeriesSecurity(StatsTest):
"""Tests to make sure all restricted data remains restricted."""
mock_es = True # We're checking only headers, not content.
def test_private_addon_no_groups(self):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.views_gen(format='json'), 403)
def test_private_addon_stats_group(self):
# Logged in with stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group)
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 403)
def test_private_addon_contrib_stats_group(self):
# Logged in with stats and contrib stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 200)
def test_private_addon_anonymous(self):
# Not logged in
self.client.logout()
self._check_it(self.views_gen(format='json'), 403)
def test_public_addon_no_groups(self):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
def test_public_addon_stats_group(self):
# Logged in with stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group)
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
def test_public_addon_contrib_stats_group(self):
# Logged in with stats and contrib stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 200)
def test_public_addon_anonymous(self):
# Not logged in
self.client.logout()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
class TestCSVs(ESStatsTest):
"""Tests for CSV output of all known series views."""
def test_downloads_series(self):
response = self.get_view_response('stats.downloads_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count
2009-09-03,10
2009-08-03,10
2009-07-03,10
2009-06-28,10
2009-06-20,10
2009-06-12,10
2009-06-07,10
2009-06-01,10""")
def test_usage_series(self):
for url_args in [self.url_args, self.url_args_theme]:
self.url_args = url_args
response = self.get_view_response('stats.usage_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count
2009-06-02,1500
2009-06-01,1000""")
def test_contributions_series(self):
response = self.get_view_response('stats.contributions_series',
group='day', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,total,count,average
2009-06-02,4.98,2,2.49
2009-06-01,5.00,1,5.0""")
def test_sources_series(self):
response = self.get_view_response('stats.sources_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count,search,api
2009-09-03,10,3,2
2009-08-03,10,3,2
2009-07-03,10,3,2
2009-06-28,10,3,2
2009-06-20,10,3,2
2009-06-12,10,3,2
2009-06-07,10,3,2
2009-06-01,10,3,2""")
def test_os_series(self):
response = self.get_view_response('stats.os_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count,Windows,Linux
2009-06-02,1500,500,400
2009-06-01,1000,400,300""")
def test_locales_series(self):
response = self.get_view_response('stats.locales_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(
response,
"""date,count,English (US) (en-us),"""
"""\xce\x95\xce\xbb\xce\xbb\xce\xb7\xce\xbd\xce\xb9\xce\xba"""
"""\xce\xac (el)
2009-06-02,1500,300,400
2009-06-01,1000,300,400""")
def test_statuses_series(self):
response = self.get_view_response('stats.statuses_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count,userEnabled,userDisabled
2009-06-02,1500,1370,130
2009-06-01,1000,950,50""")
def test_versions_series(self):
response = self.get_view_response('stats.versions_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count,2.0,1.0
2009-06-02,1500,950,550
2009-06-01,1000,800,200""")
def test_apps_series(self):
response = self.get_view_response('stats.apps_series',
group='month', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count,Firefox 4.0
2009-06-02,1500,1500
2009-06-01,1000,1000""")
def test_no_cache(self):
"""Test that the csv or json is not caching, due to lack of data."""
self.url_args = {'start': '20200101', 'end': '20200130', 'addon_id': 4}
response = self.get_view_response('stats.versions_series', head=True,
group='day', format='csv')
assert (
set(response['cache-control'].split(', ')),
{'max-age=0', 'no-cache', 'no-store', 'must-revalidate'},
)
self.url_args = {'start': '20200101', 'end': '20200130', 'addon_id': 4}
response = self.get_view_response('stats.versions_series', head=True,
group='day', format='json')
assert (
set(response['cache-control'].split(', ')),
{'max-age=0', 'no-cache', 'no-store', 'must-revalidate'},
)
def test_usage_series_no_data(self):
url_args = [
{'start': '20010101', 'end': '20010130', 'addon_id': 4},
# Also test for themes.
{'start': '20010101', 'end': '20010130', 'addon_id': 6}
]
for url_arg in url_args:
self.url_args = url_arg
response = self.get_view_response('stats.usage_series',
group='day', format='csv')
assert response.status_code == 200
self.csv_eq(response, """date,count""")
class TestCacheControl(StatsTest):
"""Tests we set cache control headers"""
def _test_cache_control(self):
response = self.get_view_response('stats.downloads_series', head=True,
group='month', format='json')
assert response.get('cache-control', '').startswith('max-age='), (
'Bad or no cache-control: %r' % response.get('cache-control', ''))
class TestLayout(StatsTest):
def test_not_public_stats(self):
r = self.client.get(reverse('stats.downloads', args=[4]))
assert r.status_code == 404
def get_public_url(self):
addon = amo.tests.addon_factory(public_stats=True)
return reverse('stats.downloads', args=[addon.slug])
def test_public_stats_page_loads(self):
r = self.client.get(self.get_public_url())
assert r.status_code == 200
def test_public_stats_stats_notes(self):
r = self.client.get(self.get_public_url())
assert pq(r.content)('#stats-note h2').length == 1
class TestResponses(ESStatsTest):
def test_usage_json(self):
for url_args in [self.url_args, self.url_args_theme]:
self.url_args = url_args
r = self.get_view_response('stats.usage_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{'count': 1500, 'date': '2009-06-02', 'end': '2009-06-02'},
{'count': 1000, 'date': '2009-06-01', 'end': '2009-06-01'},
])
def test_usage_csv(self):
for url_args in [self.url_args, self.url_args_theme]:
self.url_args = url_args
r = self.get_view_response('stats.usage_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r,
"""date,count
2009-06-02,1500
2009-06-01,1000""")
def test_usage_by_app_json(self):
r = self.get_view_response('stats.apps_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"data": {
"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": {"4.0": 1500}
},
"count": 1500,
"date": "2009-06-02",
"end": "2009-06-02"
},
{
"data": {
"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": {"4.0": 1000}
},
"count": 1000,
"date": "2009-06-01",
"end": "2009-06-01"
}
])
def test_usage_by_app_csv(self):
r = self.get_view_response('stats.apps_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,Firefox 4.0
2009-06-02,1500,1500
2009-06-01,1000,1000""")
def test_usage_by_locale_json(self):
r = self.get_view_response('stats.locales_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"count": 1500,
"date": "2009-06-02",
"end": "2009-06-02",
"data": {
u"Ελληνικά (el)": 400,
u"English (US) (en-us)": 300
}
},
{
"count": 1000,
"date": "2009-06-01",
"end": "2009-06-01",
"data": {
u"Ελληνικά (el)": 400,
u"English (US) (en-us)": 300
}
}
])
def test_usage_by_locale_csv(self):
r = self.get_view_response('stats.locales_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,English (US) (en-us),Ελληνικά (el)
2009-06-02,1500,300,400
2009-06-01,1000,300,400""")
def test_usage_by_os_json(self):
r = self.get_view_response('stats.os_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"count": 1500,
"date": "2009-06-02",
"end": "2009-06-02",
"data": {
"Linux": 400,
"Windows": 500
}
},
{
"count": 1000,
"date": "2009-06-01",
"end": "2009-06-01",
"data": {
"Linux": 300,
"Windows": 400
}
}
])
def test_usage_by_os_csv(self):
r = self.get_view_response('stats.os_series', head=True, group='day',
format='csv')
assert r.status_code == 200
def test_usage_by_version_json(self):
r = self.get_view_response('stats.versions_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"count": 1500,
"date": "2009-06-02",
"end": "2009-06-02",
"data": {
"1.0": 550,
"2.0": 950
}
},
{
"count": 1000,
"date": "2009-06-01",
"end": "2009-06-01",
"data": {
"1.0": 200,
"2.0": 800
}
}
])
def test_usage_by_version_csv(self):
r = self.get_view_response('stats.versions_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,2.0,1.0
2009-06-02,1500,950,550
2009-06-01,1000,800,200""")
def test_usage_by_status_json(self):
r = self.get_view_response('stats.statuses_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"count": 1500,
"date": "2009-06-02",
"end": "2009-06-02",
"data": {
"userDisabled": 130,
"userEnabled": 1370
}
},
{
"count": 1000,
"date": "2009-06-01",
"end": "2009-06-01",
"data": {
"userDisabled": 50,
"userEnabled": 950
}
}
])
def test_usage_by_status_csv(self):
r = self.get_view_response('stats.statuses_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,userEnabled,userDisabled
2009-06-02,1500,1370,130
2009-06-01,1000,950,50""")
def test_overview(self):
r = self.get_view_response('stats.overview_series', group='day',
format='json')
assert r.status_code == 200
# These are the dates from the fixtures. The return value will have
# dates in between filled with zeroes.
expected_data = [
{"date": "2009-09-03",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-08-03",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-07-03",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-06-28",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-06-20",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-06-12",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-06-07",
"data": {"downloads": 10, "updates": 0}},
{"date": "2009-06-02",
"data": {"downloads": 0, "updates": 1500}},
{"date": "2009-06-01",
"data": {"downloads": 10, "updates": 1000}}
]
actual_data = json.loads(r.content)
# Make sure they match up at the front and back.
assert actual_data[0]['date'] == expected_data[0]['date']
assert actual_data[-1]['date'] == expected_data[-1]['date']
end_date = expected_data[-1]['date']
expected, actual = iter(expected_data), iter(actual_data)
next_expected, next_actual = next(expected), next(actual)
while 1:
if next_expected['date'] == next_actual['date']:
# If they match it's a date we have data for.
self.assertDictEqual(next_expected, next_actual)
if next_expected['date'] == end_date:
break
next_expected, next_actual = next(expected), next(actual)
else:
# Otherwise just check that the data is zeroes.
self.assertDictEqual(next_actual['data'],
{'downloads': 0, 'updates': 0})
next_actual = next(actual)
def test_downloads_json(self):
r = self.get_view_response('stats.downloads_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{"count": 10, "date": "2009-09-03", "end": "2009-09-03"},
{"count": 10, "date": "2009-08-03", "end": "2009-08-03"},
{"count": 10, "date": "2009-07-03", "end": "2009-07-03"},
{"count": 10, "date": "2009-06-28", "end": "2009-06-28"},
{"count": 10, "date": "2009-06-20", "end": "2009-06-20"},
{"count": 10, "date": "2009-06-12", "end": "2009-06-12"},
{"count": 10, "date": "2009-06-07", "end": "2009-06-07"},
{"count": 10, "date": "2009-06-01", "end": "2009-06-01"},
])
def test_downloads_csv(self):
r = self.get_view_response('stats.downloads_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count
2009-09-03,10
2009-08-03,10
2009-07-03,10
2009-06-28,10
2009-06-20,10
2009-06-12,10
2009-06-07,10
2009-06-01,10""")
def test_downloads_sources_json(self):
r = self.get_view_response('stats.sources_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{"count": 10,
"date": "2009-09-03",
"end": "2009-09-03",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-08-03",
"end": "2009-08-03",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-07-03",
"end": "2009-07-03",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-06-28",
"end": "2009-06-28",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-06-20",
"end": "2009-06-20",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-06-12",
"end": "2009-06-12",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-06-07",
"end": "2009-06-07",
"data": {"api": 2, "search": 3}},
{"count": 10,
"date": "2009-06-01",
"end": "2009-06-01",
"data": {"api": 2, "search": 3}}
])
def test_downloads_sources_csv(self):
r = self.get_view_response('stats.sources_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,search,api
2009-09-03,10,3,2
2009-08-03,10,3,2
2009-07-03,10,3,2
2009-06-28,10,3,2
2009-06-20,10,3,2
2009-06-12,10,3,2
2009-06-07,10,3,2
2009-06-01,10,3,2""")
def test_contributions_series_json(self):
r = self.get_view_response('stats.contributions_series', group='day',
format='json')
assert r.status_code == 200
self.assertListEqual(json.loads(r.content), [
{
"count": 2,
"date": "2009-06-02",
"average": 2.49,
"total": 4.98,
"end": "2009-06-02"
},
{
"count": 1,
"date": "2009-06-01",
"average": 5.0,
"total": 5.0,
"end": "2009-06-01"
}
])
def test_contributions_series_csv(self):
r = self.get_view_response('stats.contributions_series', group='day',
format='csv')
assert r.status_code == 200
self.csv_eq(r, """date,count,total,average
2009-06-02,2,4.98,2.49
2009-06-01,1,5.00,5.0""")
# Test the SQL query by using known dates, for weeks and months etc.
class TestSiteQuery(TestCase):
def setUp(self):
super(TestSiteQuery, self).setUp()
self.start = datetime.date(2012, 1, 1)
self.end = datetime.date(2012, 1, 31)
for k in xrange(0, 15):
for name in ['addon_count_new', 'version_count_new']:
date_ = self.start + datetime.timedelta(days=k)
GlobalStat.objects.create(date=date_, name=name, count=k)
def test_day_grouping(self):
res = views._site_query('date', self.start, self.end)[0]
assert len(res) == 14
assert res[0]['data']['addons_created'] == 14
# Make sure we are returning counts as integers, otherwise
# DjangoJSONSerializer will map them to strings.
assert type(res[0]['data']['addons_created']) == int
assert res[0]['date'] == '2012-01-15'
def test_week_grouping(self):
res = views._site_query('week', self.start, self.end)[0]
assert len(res) == 3
assert res[1]['data']['addons_created'] == 70
assert res[1]['date'] == '2012-01-08'
def test_month_grouping(self):
res = views._site_query('month', self.start, self.end)[0]
assert len(res) == 1
assert res[0]['data']['addons_created'] == (14 * (14 + 1)) / 2
assert res[0]['date'] == '2012-01-02'
def test_period(self):
self.assertRaises(AssertionError, views._site_query, 'not_period',
self.start, self.end)
@mock.patch('olympia.stats.views._site_query')
class TestSite(TestCase):
def tests_period(self, _site_query):
_site_query.return_value = ['.', '.']
for period in ['date', 'week', 'month']:
self.client.get(reverse('stats.site', args=['json', period]))
assert _site_query.call_args[0][0] == period
def tests_period_day(self, _site_query):
_site_query.return_value = ['.', '.']
start = (datetime.date.today() - datetime.timedelta(days=3))
end = datetime.date.today()
self.client.get(reverse('stats.site.new',
args=['day', start.strftime('%Y%m%d'),
end.strftime('%Y%m%d'), 'json']))
assert _site_query.call_args[0][0] == 'date'
assert _site_query.call_args[0][1] == start
assert _site_query.call_args[0][2] == end
def test_csv(self, _site_query):
_site_query.return_value = [[], []]
res = self.client.get(reverse('stats.site', args=['csv', 'date']))
assert res._headers['content-type'][1].startswith('text/csv')
def test_json(self, _site_query):
_site_query.return_value = [[], []]
res = self.client.get(reverse('stats.site', args=['json', 'date']))
assert res._headers['content-type'][1].startswith('text/json')
def tests_no_date(self, _site_query):
_site_query.return_value = ['.', '.']
self.client.get(reverse('stats.site', args=['json', 'date']))
assert _site_query.call_args[0][1] == (
datetime.date.today() - datetime.timedelta(days=365))
assert _site_query.call_args[0][2] == datetime.date.today()
class TestCollections(amo.tests.ESTestCase):
fixtures = ['bandwagon/test_models', 'base/users',
'base/addon_3615', 'base/addon_5369']
def setUp(self):
super(TestCollections, self).setUp()
self.today = datetime.date.today()
self.collection = Collection.objects.get(pk=512)
self.url = reverse('stats.collection',
args=[self.collection.uuid, 'json'])
for x in xrange(1, 4):
data = {'date': self.today - datetime.timedelta(days=x - 1),
'id': int(self.collection.pk), 'count': x,
'data': search.es_dict({'subscribers': x, 'votes_up': x,
'votes_down': x, 'downloads': x})}
CollectionCount.index(data, id='%s-%s' % (x, self.collection.pk))
self.refresh('stats')
def tests_collection_anon(self):
res = self.client.get(self.url)
assert res.status_code == 403
def tests_collection_user(self):
self.client.login(username='admin@mozilla.com', password='password')
res = self.client.get(self.url)
assert res.status_code == 200
def tests_collection_admin(self):
self.client.login(username='admin@mozilla.com', password='password')
self.collection.update(author=None)
res = self.client.get(self.url)
assert res.status_code == 200
def test_collection_json(self):
self.client.login(username='admin@mozilla.com', password='password')
res = self.client.get(self.url)
content = json.loads(res.content)
assert len(content) == 3
assert content[0]['count'] == 1
assert content[0]['data']['votes_down'] == 1
assert content[0]['data']['downloads'] == 1
def test_collection_csv(self):
self.client.login(username='admin@mozilla.com', password='password')
self.url = reverse('stats.collection',
args=[self.collection.uuid, 'csv'])
res = self.client.get(self.url)
date = (self.today.strftime('%Y-%m-%d'))
assert '%s,1,1,1,1,1' % date in res.content
def get_url(self, start, end):
return reverse('collections.stats.subscribers_series',
args=[self.collection.author.username,
self.collection.slug, 'day',
start.strftime('%Y%m%d'),
end.strftime('%Y%m%d'), 'json'])
def test_collection_one_day(self):
self.client.login(username='admin@mozilla.com', password='password')
url = self.get_url(self.today, self.today)
res = self.client.get(url)
content = json.loads(res.content)
assert len(content) == 1
assert content[0]['date'] == self.today.strftime('%Y-%m-%d')
def test_collection_range(self):
self.client.login(username='admin@mozilla.com', password='password')
yesterday = self.today - datetime.timedelta(days=1)
day_before = self.today - datetime.timedelta(days=2)
url = self.get_url(day_before, yesterday)
res = self.client.get(url)
content = json.loads(res.content)
assert len(content) == 2
assert content[0]['date'] == yesterday.strftime('%Y-%m-%d')
assert content[1]['date'] == day_before.strftime('%Y-%m-%d')
class TestXss(amo.tests.TestXss):
def test_stats_page(self):
url = reverse('stats.overview', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_date_range_or_404_xss(self):
with self.assertRaises(Http404):
views.get_daterange_or_404(start='<alert>', end='20010101')
def test_report_view_xss(self):
req = RequestFactory().get('/', start='<alert>', end='20010101')
assert views.get_report_view(req) == {}
req = RequestFactory().get('/', last='<alert>')
assert views.get_report_view(req) == {}
class ArchiveTestCase(APIKeyAuthTestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.user = UserProfile.objects.get(email='del@icio.us')
self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f')
self.addon = Addon.objects.get(pk=3615)
self.theme_update_count = ThemeUpdateCount(
addon_id=3615, date='2016-01-18', count=123)
def tearDown(self):
self.clean_up_files()
def clean_up_files(self):
path = os.path.join(views.storage.location, '3615')
if os.path.isdir(path):
shutil.rmtree(path)
def get(self, url=None):
return self.client.get(url, HTTP_AUTHORIZATION=self.authorization())
def test_list_not_authenticated(self):
response = self.client.get(
reverse('stats.archive_list', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01'}))
assert response.status_code == 401
def test_list_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(
reverse('stats.archive_list', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01'}))
assert response.status_code == 403
def test_list(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
AddonUser.objects.create(user=self.user, addon=self.addon)
save_stats_to_file(self.theme_update_count)
response = self.get(
reverse('stats.archive_list', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01'}))
assert response.status_code == 200
assert json.loads(response.content) == [
{
'date': '2016-01-18',
'addon_id': 3615,
'model_name': 'themeupdatecount',
}
]
def test_list_not_existing(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
AddonUser.objects.create(user=self.user, addon=self.addon)
response = self.get(
reverse('stats.archive_list', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01'}))
assert response.status_code == 404
def test_get(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
AddonUser.objects.create(user=self.user, addon=self.addon)
save_stats_to_file(self.theme_update_count)
response = self.get(
reverse('stats.archive', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01',
'day': '18', 'model_name': 'themeupdatecount'}))
assert response.status_code == 200
assert json.loads(response.content) == {
'count': 123,
'date': '2016-01-18',
'addon': 3615
}
def test_get_not_existing(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
AddonUser.objects.create(user=self.user, addon=self.addon)
response = self.get(
reverse('stats.archive', kwargs={
'slug': 'a3615', 'year': '2016', 'month': '01',
'day': '18', 'model_name': 'themeupdatecount'}))
assert response.status_code == 404
|
Prashant-Surya/addons-server
|
src/olympia/stats/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,979
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0015_auto_20150928_0850'),
]
operations = [
migrations.CreateModel(
name='UserVote',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('bidrag', models.ForeignKey(to='core.Bidrag')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='uservote',
unique_together=set([('bidrag', 'user')]),
),
]
|
hultberg/ppinnlevering
|
core/migrations/0016_auto_20151001_0714.py
|
Python
|
apache-2.0
| 843
| 0.001186
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
INTEGER = "int"
BINARY = "binary"
class Discretization(Layer):
"""Buckets data into discrete ranges.
This layer will place each element of its input data into one of several
contiguous ranges and output either an integer index or a one-hot vector
indicating which range each element was placed in.
What happens in `adapt()`: The dataset is examined and sliced.
Input shape:
Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher.
Output shape:
The same as the input shape if `output_mode` is 'int', or
`[output_shape, num_buckets]` if `output_mode` is 'binary'.
Attributes:
bins: Optional boundary specification. Bins include the left boundary and
exclude the right boundary, so `bins=[0., 1., 2.]` generates bins
`(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`.
output_mode: One of 'int', 'binary'. Defaults to 'int'.
"""
def __init__(self, bins, output_mode=INTEGER, **kwargs):
super(Discretization, self).__init__(**kwargs)
self._supports_ragged_inputs = True
self.bins = bins
self.output_mode = output_mode
def get_config(self):
config = {
"bins": self.bins,
"output_mode": self.output_mode,
}
base_config = super(Discretization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
if self.output_mode == INTEGER:
return input_shape
else:
return tensor_shape.TensorShape([dim for dim in input_shape] +
[len(self.bins)])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def call(self, inputs):
if ragged_tensor.is_ragged(inputs):
integer_buckets = ragged_functional_ops.map_flat_values(
math_ops._bucketize, inputs, boundaries=self.bins) # pylint: disable=protected-access
# Ragged map_flat_values doesn't touch the non-values tensors in the
# ragged composite tensor. If this op is the only op a Keras model,
# this can cause errors in Graph mode, so wrap the tensor in an identity.
integer_buckets = array_ops.identity(integer_buckets)
elif isinstance(inputs, sparse_tensor.SparseTensor):
integer_buckets = math_ops._bucketize( # pylint: disable=protected-access
inputs.values,
boundaries=self.bins)
else:
integer_buckets = math_ops._bucketize(inputs, boundaries=self.bins) # pylint: disable=protected-access
if self.output_mode == INTEGER:
if isinstance(inputs, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(
indices=array_ops.identity(inputs.indices),
values=integer_buckets,
dense_shape=array_ops.identity(inputs.dense_shape))
return integer_buckets
else:
if isinstance(inputs, sparse_tensor.SparseTensor):
raise ValueError("`output_mode=binary` is not supported for "
"sparse input")
# The 'bins' array is the set of boundaries between the bins. We actually
# have 'len(bins)+1' outputs.
# TODO(momernick): This will change when we have the ability to adapt().
return array_ops.one_hot(integer_buckets, depth=len(self.bins) + 1)
|
gunan/tensorflow
|
tensorflow/python/keras/layers/preprocessing/discretization.py
|
Python
|
apache-2.0
| 4,879
| 0.005329
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
from sha import sha
from random import randint
#this is ugly, hopefully os.entropy will be in 2.4
try:
from entropy import entropy
except ImportError:
def entropy(n):
s = ''
for i in range(n):
s += chr(randint(0,255))
return s
def intify(hstr):
"""20 bit hash, big-endian -> long python integer"""
assert len(hstr) == 20
return long(hstr.encode('hex'), 16)
def stringify(num):
"""long int -> 20-character string"""
str = hex(num)[2:]
if str[-1] == 'L':
str = str[:-1]
if len(str) % 2 != 0:
str = '0' + str
str = str.decode('hex')
return (20 - len(str)) *'\x00' + str
def distance(a, b):
"""distance between two 160-bit hashes expressed as 20-character strings"""
return intify(a) ^ intify(b)
def newID():
"""returns a new pseudorandom globally unique ID string"""
h = sha()
h.update(entropy(20))
return h.digest()
def newIDInRange(min, max):
return stringify(randRange(min,max))
def randRange(min, max):
return min + intify(newID()) % (max - min)
def newTID():
return randRange(-2**30, 2**30)
### Test Cases ###
import unittest
class NewID(unittest.TestCase):
def testLength(self):
self.assertEqual(len(newID()), 20)
def testHundreds(self):
for x in xrange(100):
self.testLength
class Intify(unittest.TestCase):
known = [('\0' * 20, 0),
('\xff' * 20, 2L**160 - 1),
]
def testKnown(self):
for str, value in self.known:
self.assertEqual(intify(str), value)
def testEndianessOnce(self):
h = newID()
while h[-1] == '\xff':
h = newID()
k = h[:-1] + chr(ord(h[-1]) + 1)
self.assertEqual(intify(k) - intify(h), 1)
def testEndianessLots(self):
for x in xrange(100):
self.testEndianessOnce()
class Disantance(unittest.TestCase):
known = [
(("\0" * 20, "\xff" * 20), 2**160L -1),
((sha("foo").digest(), sha("foo").digest()), 0),
((sha("bar").digest(), sha("bar").digest()), 0)
]
def testKnown(self):
for pair, dist in self.known:
self.assertEqual(distance(pair[0], pair[1]), dist)
def testCommutitive(self):
for i in xrange(100):
x, y, z = newID(), newID(), newID()
self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z))
class RandRange(unittest.TestCase):
def testOnce(self):
a = intify(newID())
b = intify(newID())
if a < b:
c = randRange(a, b)
self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a))
else:
c = randRange(b, a)
assert b <= c < a, "output out of range %d %d %d" % (b, c, a)
def testOneHundredTimes(self):
for i in xrange(100):
self.testOnce()
if __name__ == '__main__':
unittest.main()
|
rays/ipodderx-core
|
khashmir/khash.py
|
Python
|
mit
| 3,533
| 0.01019
|
"""
ex_compound_nomo_1.py
Compound nomograph: (A+B)/E=F/(CD)
Copyright (C) 2007-2009 Leif Roschier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, "..")
from pynomo.nomographer import *
# type 1
A_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$A$',
'tick_levels':2,
'tick_text_levels':1,
}
B_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$B$',
'tick_levels':2,
'tick_text_levels':1,
}
R1a_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:-u,
'title':'',
'tick_levels':0,
'tick_text_levels':0,
'tag':'r1'
}
block_1_params={
'block_type':'type_1',
'width':10.0,
'height':10.0,
'f1_params':A_params,
'f2_params':B_params,
'f3_params':R1a_params,
'isopleth_values':[[1,7,'x']]
}
# type 4
R1b_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$R_1$',
'tick_levels':0,
'tick_text_levels':0,
'tick_side':'right',
'title_draw_center':True,
'title_opposite_tick':False,
'tag':'r1'
}
E_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$E$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'right',
'title_draw_center':True,
'title_opposite_tick':False,
}
F_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$F$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'left',
'title_draw_center':True,
'title_opposite_tick':True,
}
R2a_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$R_2$',
'tick_levels':0,
'tick_text_levels':0,
'tick_side':'left',
'title_draw_center':True,
'title_opposite_tick':False,
'tag':'r2'
}
block_2_params={
'block_type':'type_4',
'f1_params':R1b_params,
'f2_params':E_params,
'f3_params':F_params,
'f4_params':R2a_params,
'mirror_x':True,
'isopleth_values':[['x',9,4,'x']]
}
# type 2 N
R2b_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$$',
'tick_levels':0,
'tick_text_levels':0,
'tag':'r2'
}
C_params={
'u_min':0.5,
'u_max':5.0,
'function':lambda u:u,
'title':r'$C$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'left',
'scale_type':'linear smart',
}
D_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$D$',
'tick_levels':3,
'tick_text_levels':1,
}
block_3_params={
'block_type':'type_2',
'width':10.0,
'height':10.0,
'f1_params':R2b_params,
'f2_params':C_params,
'f3_params':D_params,
'mirror_y':True,
'isopleth_values':[['x',1,'x']]
}
main_params={
'filename':'ex_compound_nomo_1.pdf',
'paper_height':10.0,
'paper_width':10.0,
'block_params':[block_1_params,block_2_params,block_3_params],
'transformations':[('rotate',0.01),('scale paper',)],
}
Nomographer(main_params)
|
dbaynard/pynomo
|
examples/ex_compound_nomo_1.py
|
Python
|
gpl-3.0
| 4,466
| 0.031572
|
from django.contrib import admin
from general.models import StaticPage
admin.site.register(StaticPage)
|
Gargamel1989/Seasoning-old
|
Seasoning/general/admin.py
|
Python
|
gpl-3.0
| 103
| 0.009709
|
import sys
from Bio import SeqIO
SNPTOPEAKFILENAME = sys.argv[1]
GENOMEFILENAME = sys.argv[2]
DISTANCE = int(sys.argv[3])
BINDALLELESEQFILENAME = sys.argv[4]
NONBINDALLELEFILENAME = sys.argv[5]
FIRSTPEAKCOL = int(sys.argv[6]) # 0-INDEXED
def getSNPInfo(SNPToPeakLine):
# Get the SNP and peak location from the current line
if SNPToPeakLine == "":
# At the end of the SNP to peak file, so stop
return [("", 0), ("", ""), ("", 0)]
SNPToPeakLineElements = SNPToPeakLine.split("\t")
return [(SNPToPeakLineElements[0], int(SNPToPeakLineElements[1])), (SNPToPeakLineElements[2], SNPToPeakLineElements[3]), (SNPToPeakLineElements[FIRSTPEAKCOL], int(SNPToPeakLineElements[FIRSTPEAKCOL+1]))]
def getSequencesForSNPs():
# For each SNP, get the sequence of its peak +/- distances with the binding and non-binding alleles
SNPToPeakFile = open(SNPTOPEAKFILENAME)
[SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip())
lastPeakLocation = ("", 0)
bindAlleleSeq = ""
nonBindAlleleSeq = ""
bindAlleleSeqFile = open(BINDALLELESEQFILENAME, 'w+')
nonBindAlleleSeqFile = open(NONBINDALLELEFILENAME, 'w+')
numSharingPeak = 0
for seqRecord in SeqIO.parse(GENOMEFILENAME, "fasta"):
# Iterate through the chromosomes and get the sequences surrounding each SNP in each chromosome
# Combine SNPs that are in the same peak, and ASSUME THAT THEY ARE IN LD AND THE BINDING ALLELES CORRESPOND TO EACH OTHER
while seqRecord.id == SNPLocation[0]:
# Iterate through all SNPs on the current chromosome
if peakLocation != lastPeakLocation:
# At a new peak
if lastPeakLocation[0] != "":
# Record the last peak
bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n")
nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n")
bindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1]))
nonBindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1]))
else:
numSharingPeak = numSharingPeak + 1
SNPLocationInSeq = DISTANCE - (peakLocation[1] - SNPLocation[1]) - 1
bindAlleleSeq[SNPLocationInSeq] = SNPAlleles[0]
nonBindAlleleSeq[SNPLocationInSeq] = SNPAlleles[1]
lastPeakLocation = peakLocation
[SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip())
print numSharingPeak
bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n")
nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n")
SNPToPeakFile.close()
bindAlleleSeqFile.close()
nonBindAlleleSeqFile.close()
if __name__=="__main__":
getSequencesForSNPs()
|
imk1/IMKTFBindingCode
|
getSequencesForSNPs.py
|
Python
|
mit
| 2,698
| 0.021497
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
This documentation was automatically generated using original comments in
Doxygen format. As some C types and data structures cannot be directly mapped
into Python types, some non-trivial type conversion could have place.
Basically a type is replaced with another one that has the closest match, and
sometimes one argument of generated function comprises several arguments of the
original function (usually two).
Functions having error code as the return value and returning effective
value in one of its arguments are transformed so that the effective value is
returned in a regular fashion and run-time exception is being thrown in case of
negative error code.
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pocketsphinx', [dirname(__file__)])
except ImportError:
import _pocketsphinx
return _pocketsphinx
if fp is not None:
try:
_mod = imp.load_module('_pocketsphinx', fp, pathname, description)
finally:
fp.close()
return _mod
_pocketsphinx = swig_import_helper()
del swig_import_helper
else:
import _pocketsphinx
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import sphinxbase
class Hypothesis(object):
"""Proxy of C Hypothesis struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
hypstr = _swig_property(_pocketsphinx.Hypothesis_hypstr_get, _pocketsphinx.Hypothesis_hypstr_set)
best_score = _swig_property(_pocketsphinx.Hypothesis_best_score_get, _pocketsphinx.Hypothesis_best_score_set)
prob = _swig_property(_pocketsphinx.Hypothesis_prob_get, _pocketsphinx.Hypothesis_prob_set)
def __init__(self, *args):
"""__init__(Hypothesis self, char const * hypstr, int best_score, int prob) -> Hypothesis"""
this = _pocketsphinx.new_Hypothesis(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_Hypothesis
__del__ = lambda self : None;
Hypothesis_swigregister = _pocketsphinx.Hypothesis_swigregister
Hypothesis_swigregister(Hypothesis)
class Segment(object):
"""Proxy of C Segment struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
word = _swig_property(_pocketsphinx.Segment_word_get, _pocketsphinx.Segment_word_set)
ascore = _swig_property(_pocketsphinx.Segment_ascore_get, _pocketsphinx.Segment_ascore_set)
lscore = _swig_property(_pocketsphinx.Segment_lscore_get, _pocketsphinx.Segment_lscore_set)
lback = _swig_property(_pocketsphinx.Segment_lback_get, _pocketsphinx.Segment_lback_set)
prob = _swig_property(_pocketsphinx.Segment_prob_get, _pocketsphinx.Segment_prob_set)
start_frame = _swig_property(_pocketsphinx.Segment_start_frame_get, _pocketsphinx.Segment_start_frame_set)
end_frame = _swig_property(_pocketsphinx.Segment_end_frame_get, _pocketsphinx.Segment_end_frame_set)
def fromIter(*args):
"""fromIter(ps_seg_t * itor) -> Segment"""
return _pocketsphinx.Segment_fromIter(*args)
fromIter = staticmethod(fromIter)
__swig_destroy__ = _pocketsphinx.delete_Segment
__del__ = lambda self : None;
def __init__(self):
"""__init__(Segment self) -> Segment"""
this = _pocketsphinx.new_Segment()
try: self.this.append(this)
except: self.this = this
Segment_swigregister = _pocketsphinx.Segment_swigregister
Segment_swigregister(Segment)
def Segment_fromIter(*args):
"""Segment_fromIter(ps_seg_t * itor) -> Segment"""
return _pocketsphinx.Segment_fromIter(*args)
class NBest(object):
"""Proxy of C NBest struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
hypstr = _swig_property(_pocketsphinx.NBest_hypstr_get, _pocketsphinx.NBest_hypstr_set)
score = _swig_property(_pocketsphinx.NBest_score_get, _pocketsphinx.NBest_score_set)
def fromIter(*args):
"""fromIter(ps_nbest_t * itor) -> NBest"""
return _pocketsphinx.NBest_fromIter(*args)
fromIter = staticmethod(fromIter)
def hyp(self):
"""hyp(NBest self) -> Hypothesis"""
return _pocketsphinx.NBest_hyp(self)
__swig_destroy__ = _pocketsphinx.delete_NBest
__del__ = lambda self : None;
def __init__(self):
"""__init__(NBest self) -> NBest"""
this = _pocketsphinx.new_NBest()
try: self.this.append(this)
except: self.this = this
NBest_swigregister = _pocketsphinx.NBest_swigregister
NBest_swigregister(NBest)
def NBest_fromIter(*args):
"""NBest_fromIter(ps_nbest_t * itor) -> NBest"""
return _pocketsphinx.NBest_fromIter(*args)
class SegmentIterator(object):
"""Proxy of C SegmentIterator struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ptr = _swig_property(_pocketsphinx.SegmentIterator_ptr_get, _pocketsphinx.SegmentIterator_ptr_set)
def __init__(self, *args):
"""__init__(SegmentIterator self, ps_seg_t * ptr) -> SegmentIterator"""
this = _pocketsphinx.new_SegmentIterator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_SegmentIterator
__del__ = lambda self : None;
def next(self):
"""next(SegmentIterator self) -> Segment"""
return _pocketsphinx.SegmentIterator_next(self)
def __next__(self):
"""__next__(SegmentIterator self) -> Segment"""
return _pocketsphinx.SegmentIterator___next__(self)
SegmentIterator_swigregister = _pocketsphinx.SegmentIterator_swigregister
SegmentIterator_swigregister(SegmentIterator)
class NBestIterator(object):
"""Proxy of C NBestIterator struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ptr = _swig_property(_pocketsphinx.NBestIterator_ptr_get, _pocketsphinx.NBestIterator_ptr_set)
def __init__(self, *args):
"""__init__(NBestIterator self, ps_nbest_t * ptr) -> NBestIterator"""
this = _pocketsphinx.new_NBestIterator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_NBestIterator
__del__ = lambda self : None;
def next(self):
"""next(NBestIterator self) -> NBest"""
return _pocketsphinx.NBestIterator_next(self)
def __next__(self):
"""__next__(NBestIterator self) -> NBest"""
return _pocketsphinx.NBestIterator___next__(self)
NBestIterator_swigregister = _pocketsphinx.NBestIterator_swigregister
NBestIterator_swigregister(NBestIterator)
class Decoder(object):
"""Proxy of C Decoder struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(Decoder self) -> Decoder
__init__(Decoder self, Config config) -> Decoder
"""
this = _pocketsphinx.new_Decoder(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_Decoder
__del__ = lambda self : None;
def reinit(self, *args):
"""reinit(Decoder self, Config config)"""
return _pocketsphinx.Decoder_reinit(self, *args)
def load_dict(self, *args):
"""load_dict(Decoder self, char const * fdict, char const * ffilter, char const * format)"""
return _pocketsphinx.Decoder_load_dict(self, *args)
def save_dict(self, *args):
"""save_dict(Decoder self, char const * dictfile, char const * format)"""
return _pocketsphinx.Decoder_save_dict(self, *args)
def add_word(self, *args):
"""add_word(Decoder self, char const * word, char const * phones, int update)"""
return _pocketsphinx.Decoder_add_word(self, *args)
def lookup_word(self, *args):
"""lookup_word(Decoder self, char const * word) -> char *"""
return _pocketsphinx.Decoder_lookup_word(self, *args)
def get_lattice(self):
"""get_lattice(Decoder self) -> Lattice"""
return _pocketsphinx.Decoder_get_lattice(self)
def get_config(self):
"""get_config(Decoder self) -> Config"""
return _pocketsphinx.Decoder_get_config(self)
def default_config():
"""default_config() -> Config"""
return _pocketsphinx.Decoder_default_config()
default_config = staticmethod(default_config)
def file_config(*args):
"""file_config(char const * path) -> Config"""
return _pocketsphinx.Decoder_file_config(*args)
file_config = staticmethod(file_config)
def start_stream(self):
"""start_stream(Decoder self)"""
return _pocketsphinx.Decoder_start_stream(self)
def start_utt(self):
"""start_utt(Decoder self)"""
return _pocketsphinx.Decoder_start_utt(self)
def end_utt(self):
"""end_utt(Decoder self)"""
return _pocketsphinx.Decoder_end_utt(self)
def process_raw(self, *args):
"""process_raw(Decoder self, char const * SDATA, bool no_search, bool full_utt) -> int"""
return _pocketsphinx.Decoder_process_raw(self, *args)
def process_cep(self, *args):
"""process_cep(Decoder self, char const * SDATA, bool no_search, bool full_utt) -> int"""
return _pocketsphinx.Decoder_process_cep(self, *args)
def hyp(self):
"""hyp(Decoder self) -> Hypothesis"""
return _pocketsphinx.Decoder_hyp(self)
def get_fe(self):
"""get_fe(Decoder self) -> FrontEnd"""
return _pocketsphinx.Decoder_get_fe(self)
def get_feat(self):
"""get_feat(Decoder self) -> Feature"""
return _pocketsphinx.Decoder_get_feat(self)
def get_in_speech(self):
"""get_in_speech(Decoder self) -> bool"""
return _pocketsphinx.Decoder_get_in_speech(self)
def get_fsg(self, *args):
"""get_fsg(Decoder self, char const * name) -> FsgModel"""
return _pocketsphinx.Decoder_get_fsg(self, *args)
def set_fsg(self, *args):
"""set_fsg(Decoder self, char const * name, FsgModel fsg)"""
return _pocketsphinx.Decoder_set_fsg(self, *args)
def set_jsgf_file(self, *args):
"""set_jsgf_file(Decoder self, char const * name, char const * path)"""
return _pocketsphinx.Decoder_set_jsgf_file(self, *args)
def set_jsgf_string(self, *args):
"""set_jsgf_string(Decoder self, char const * name, char const * jsgf_string)"""
return _pocketsphinx.Decoder_set_jsgf_string(self, *args)
def get_kws(self, *args):
"""get_kws(Decoder self, char const * name) -> char const *"""
return _pocketsphinx.Decoder_get_kws(self, *args)
def set_kws(self, *args):
"""set_kws(Decoder self, char const * name, char const * keyfile)"""
return _pocketsphinx.Decoder_set_kws(self, *args)
def set_keyphrase(self, *args):
"""set_keyphrase(Decoder self, char const * name, char const * keyphrase)"""
return _pocketsphinx.Decoder_set_keyphrase(self, *args)
def set_allphone_file(self, *args):
"""set_allphone_file(Decoder self, char const * name, char const * lmfile)"""
return _pocketsphinx.Decoder_set_allphone_file(self, *args)
def get_lm(self, *args):
"""get_lm(Decoder self, char const * name) -> NGramModel"""
return _pocketsphinx.Decoder_get_lm(self, *args)
def set_lm(self, *args):
"""set_lm(Decoder self, char const * name, NGramModel lm)"""
return _pocketsphinx.Decoder_set_lm(self, *args)
def set_lm_file(self, *args):
"""set_lm_file(Decoder self, char const * name, char const * path)"""
return _pocketsphinx.Decoder_set_lm_file(self, *args)
def get_logmath(self):
"""get_logmath(Decoder self) -> LogMath"""
return _pocketsphinx.Decoder_get_logmath(self)
def set_search(self, *args):
"""set_search(Decoder self, char const * search_name)"""
return _pocketsphinx.Decoder_set_search(self, *args)
def get_search(self):
"""get_search(Decoder self) -> char const *"""
return _pocketsphinx.Decoder_get_search(self)
def n_frames(self):
"""n_frames(Decoder self) -> int"""
return _pocketsphinx.Decoder_n_frames(self)
def seg(self):
"""seg(Decoder self) -> SegmentList"""
return _pocketsphinx.Decoder_seg(self)
def nbest(self):
"""nbest(Decoder self) -> NBestList"""
return _pocketsphinx.Decoder_nbest(self)
Decoder_swigregister = _pocketsphinx.Decoder_swigregister
Decoder_swigregister(Decoder)
def Decoder_default_config():
"""Decoder_default_config() -> Config"""
return _pocketsphinx.Decoder_default_config()
def Decoder_file_config(*args):
"""Decoder_file_config(char const * path) -> Config"""
return _pocketsphinx.Decoder_file_config(*args)
class Lattice(object):
"""Proxy of C Lattice struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(Lattice self, char const * path) -> Lattice
__init__(Lattice self, Decoder decoder, char * path) -> Lattice
"""
this = _pocketsphinx.new_Lattice(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_Lattice
__del__ = lambda self : None;
def write(self, *args):
"""write(Lattice self, char const * path)"""
return _pocketsphinx.Lattice_write(self, *args)
def write_htk(self, *args):
"""write_htk(Lattice self, char const * path)"""
return _pocketsphinx.Lattice_write_htk(self, *args)
Lattice_swigregister = _pocketsphinx.Lattice_swigregister
Lattice_swigregister(Lattice)
class NBestList(object):
"""Proxy of C NBestList struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __iter__(self):
"""__iter__(NBestList self) -> NBestIterator"""
return _pocketsphinx.NBestList___iter__(self)
__swig_destroy__ = _pocketsphinx.delete_NBestList
__del__ = lambda self : None;
NBestList_swigregister = _pocketsphinx.NBestList_swigregister
NBestList_swigregister(NBestList)
class SegmentList(object):
"""Proxy of C SegmentList struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __iter__(self):
"""__iter__(SegmentList self) -> SegmentIterator"""
return _pocketsphinx.SegmentList___iter__(self)
__swig_destroy__ = _pocketsphinx.delete_SegmentList
__del__ = lambda self : None;
SegmentList_swigregister = _pocketsphinx.SegmentList_swigregister
SegmentList_swigregister(SegmentList)
|
matbra/radio_fearit
|
build/lib/python3.3/site-packages/pocketsphinx-0.0.9-py3.3-linux-x86_64.egg/pocketsphinx/pocketsphinx.py
|
Python
|
gpl-3.0
| 17,246
| 0.010495
|
from pagarme import card
from pagarme import plan
from tests.resources import pagarme_test
from tests.resources.dictionaries import card_dictionary
from tests.resources.dictionaries import customer_dictionary
from tests.resources.dictionaries import plan_dictionary
from tests.resources.dictionaries import transaction_dictionary
CARD = card.create(card_dictionary.VALID_CARD)
NO_TRIAL_PLAN = plan.create(plan_dictionary.NO_TRIAL_PLAN)
POSTBACK_URL = pagarme_test.create_postback_url()
BOLETO_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"payment_method": "boleto",
"postback_url": POSTBACK_URL,
"split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE
}
BOLETO_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"payment_method": "boleto",
"postback_url": POSTBACK_URL
}
CHARGES = {
"charges": "1"
}
CREDIT_CARD_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"card_id": CARD['id'],
"payment_method": "credit_card",
"postback_url": POSTBACK_URL,
"split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE
}
CREDIT_CARD_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"card_id": CARD['id'],
"payment_method": "credit_card",
"postback_url": POSTBACK_URL
}
UPDATE = {
"payment_method": "boleto"
}
|
pagarme/pagarme-python
|
tests/resources/dictionaries/subscription_dictionary.py
|
Python
|
mit
| 1,513
| 0
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc7292
class PKCS12TestCase(unittest.TestCase):
pfx_pem_text = """\
MIIJ0wIBAzCCCY8GCSqGSIb3DQEHAaCCCYAEggl8MIIJeDCCBggGCSqGSIb3DQEHAaCCBfkE
ggX1MIIF8TCCBe0GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAjuq0/+
0pyutQICB9AEggTYZe/mYBpmkDvKsve4EwIVwo1TNv4ldyx1qHZW2Ih6qQCY+Nv1Mnv9we0z
UTl4p3tQzCPWXnrSA82IgOdotLIez4YwXrgiKhcIkSSL+2yCmAoM+qkjiAIKq+l3UJ6Xhafe
2Kg4Ek/0RkHpe6GwjTtdefkpXpZgccMEopOtKQMLJWsDM7p77x/amn6yIk2tpskKqUY/4n8Y
xEiTWcRtTthYqZQIt+q94nKLYpt0o880SVOfvdEqp5KII7cTg60GJL+n6oN6hmP0bsAMvnk9
1f8/lFKMi9tsNU/KnUhbDVpjJwBQkhgbqBx6GdtoqSLSlYNPVM0wlntwm1JhH4ybiQ5sNzqO
7FlWC5bcYwkvOlx1gGrshY5jK/WjbA4paBpxSkgobJReirY9BeqITnvokXlub4tehHhM20Ik
42pKa3kGaHmowvzflxqE+oysW5Oa9XbZxBCfkOMJ70o4hqa+n66+E/uKcN9NbKbTo3zt3xdt
6ypOwHb74t5OcWaGx3EZsw0n0/V+WoLSpXOBwpx08+1yh7LV29aNQ0oEzVVkF6YYRQZtdIMe
s3xB2i6sjLal21ntk7iBzMJwVoi524SAZ/oW8SuDAn1c93AWWwKZLALv5V3FZ2pDiQXArcfz
DH2d5HJyNx7OlvKzNgEngwSyEC1XbjnOsZVUqGFENuDTa/brH4oEJHEkyWTyDudrz8iCEO80
e1PE4qqJ5CllN0CSVWqz4CxGDFIQXzR6ohn8f3dR3+DAaLYvAjBVMLJjk7+nfnB2L0HpanhT
Fz9AxPPIDf5pBQQwM14l8wKjEHIyfqclupeKNokBUr1ykioPyCr3nf4Rqe0Z4EKIY4OCpW6n
hrkWHmvF7OKR+bnuSk3jnBxjSN0Ivy5q9q3fntYrhscMGGR73umfi8Z29tM1vSP9jBZvirAo
geGf/sfOI0ewRvJf/5abnNg/78Zyk8WmlAHVFzNGcM3u3vhnNpTIVRuUyVkdSmOdbzeSfmqQ
2HPCEdC9HNm25KJt1pD6v6aP3Tw7qGl+tZyps7VB2i+a+UGcwQcClcoXcPSdG7Z1gBTzSr84
MuVPYlePuo1x+UwppSK3rM8ET6KqhGmESH5lKadvs8vdT6c407PfLcfxyAGzjH091prk2oRJ
xB3oQAYcKvkuMcM6FSLJC263Dj+pe1GGEexk1AoysYe67tK0sB66hvbd92HcyWhW8/vI2/PM
bX+OeEb7q+ugnsP+BmF/btWXn9AxfUqNWstyInKTn+XpqFViMIOG4e2xC4u/IvzG3VrTWUHF
4pspH3k7GB/EOLvtbsR0uacBFlsColJy0FaWT9rrdueU3YEiIRCC8LGi1XpUa8f5adeBKWN+
eRTrrF4o7uoNeGlnwZ7ebnb7k18Q0GRzzzTZPoMM4L703svfE/eNYWFHLY4NDQKSYgeum365
WAfZpHOX7YOc6oRGrGB+QuGoyikTTDO8xpcEmb8vDz4ZwHhN0PS056LNJeMoI0A/5DJb3e10
i1txlM48sbZBuIEIeixr52nwG4LuxqXGqShKaTfOrFxHjx4kI4/dp9dN/k8TGFsLWjuIgMJI
6nRHbWrxB3F0XKXagtLLep1MDwDwAuCyiW2YC0JzRvsJViIgjDA+eiHX0O6/8xiK9dzMQpIz
TVHSEqFlhORp0DGB2zATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADMA
ZgA3ADEAYQBmADYANQAtADEANgA4ADcALQA0ADQANABhAC0AOQBmADQANgAtAGMAOABiAGUA
MQA5ADQAYwAzAGUAOABlMGsGCSsGAQQBgjcRATFeHlwATQBpAGMAcgBvAHMAbwBmAHQAIABF
AG4AaABhAG4AYwBlAGQAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBp
AGQAZQByACAAdgAxAC4AMDCCA2gGCSqGSIb3DQEHAaCCA1kEggNVMIIDUTCCA00GCyqGSIb3
DQEMCgEDoIIDJTCCAyEGCiqGSIb3DQEJFgGgggMRBIIDDTCCAwkwggHxoAMCAQICEDbt9oc6
oQinRwE1826MiBEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAxMJYW5vbnltb3VzMCAXDTE2
MDcxOTIyMDAwMVoYDzIxMTYwNjI1MjIwMDAxWjAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8trBCTBjXXA4OgSO5nRTOU5T86ObCgc71
J2oCuUigSddcTDzebaD0wcyAgf101hAdwMKQ9DvrK0nGvm7FAMnnUuVeATafKgshLuUTUUfK
jx4Xif4LoS0/ev4BiOI5a1MlIRZ7T5Cyjg8bvuympzMuinQ/j1RPLIV0VGU2HuDxuuP3O898
GqZ3+F6Al5CUcwmOX9zCs91JdN/ZFZ05SXIpHQuyPSPUX5Vy8F1ZeJ8VG3nkbemfFlVkuKQq
vteL9mlT7z95rVZgGB3nUZL0tOB68eMcffA9zUksOmeTi5M6jnBcNeX2Jh9jS3YYd+IEliZm
mggQG7kPta8f+NqezL77AgMBAAGjVTBTMBUGA1UdJQQOMAwGCisGAQQBgjcKAwQwLwYDVR0R
BCgwJqAkBgorBgEEAYI3FAIDoBYMFGFub255bW91c0B3aW5kb3dzLXgAMAkGA1UdEwQCMAAw
DQYJKoZIhvcNAQEFBQADggEBALh+4qmNPzC6M8BW9/SC2ACQxxPh06GQUGx0D+GLYnp61ErZ
OtKyKdFh+uZWpu5vyYYAHCLXP7VdS/JhJy677ynAPjXiC/LAzrTNvGs74HDotD966Hiyy0Qr
ospFGiplHGRA5vXA2CiKSX+0HrVkN7rhk5PYkc6R+/cdosd+QZ8lkEa9yDWc5l//vWEbzwVy
mJf/PRf8NTkWAK6SPV7Y37j1mhkJjOH9VkRxNrd6kcihRa4u0ImXaXEsec77ER0so31DKCrP
m+rqZPj9NZSIYP3sMGJ4Bmm/n2YRdeaUzTdocfD3TRnKxs65DSgpiSq1gmtsXM7jAPs/Egrg
tbWEypgxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFKVgj/32UdEyuQcB
rqr03dPnboinBBSU7mxdpB5LTCvorCI8Tk5OMiUzjgICB9A=
"""
def setUp(self):
self.asn1Spec = rfc7292.PFX()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pfx_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(3, asn1Object['version'])
oid = asn1Object['macData']['mac']['digestAlgorithm']['algorithm']
self.assertEqual(univ.ObjectIdentifier('1.3.14.3.2.26'), oid)
md_hex = asn1Object['macData']['mac']['digest'].prettyPrint()
self.assertEqual('0xa5608ffdf651d132b90701aeaaf4ddd3e76e88a7', md_hex)
self.assertEqual(
rfc5652.id_data, asn1Object['authSafe']['contentType'])
data, rest = der_decoder(
asn1Object['authSafe']['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
authsafe, rest = der_decoder(data, asn1Spec=rfc7292.AuthenticatedSafe())
self.assertFalse(rest)
self.assertTrue(authsafe.prettyPrint())
self.assertEqual(data, der_encoder(authsafe))
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
data, rest = der_decoder(ci['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
sc, rest = der_decoder(data, asn1Spec=rfc7292.SafeContents())
self.assertFalse(rest)
self.assertTrue(sc.prettyPrint())
self.assertEqual(data, der_encoder(sc))
for sb in sc:
if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
bv, rest = der_decoder(
sb['bagValue'],
asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
self.assertFalse(rest)
self.assertTrue(bv.prettyPrint())
self.assertEqual(sb['bagValue'], der_encoder(bv))
for attr in sb['bagAttributes']:
if attr['attrType'] in rfc5652.cmsAttributesMap:
av, rest = der_decoder(
attr['attrValues'][0],
asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
self.assertFalse(rest)
self.assertTrue(av.prettyPrint())
self.assertEqual(
attr['attrValues'][0], der_encoder(av))
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.pfx_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
digest_alg = asn1Object['macData']['mac']['digestAlgorithm']
self.assertFalse(digest_alg['parameters'].hasValue())
authsafe, rest = der_decoder(
asn1Object['authSafe']['content'],
asn1Spec=rfc7292.AuthenticatedSafe(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(authsafe.prettyPrint())
self.assertEqual(
asn1Object['authSafe']['content'], der_encoder(authsafe))
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
sc, rest = der_decoder(
ci['content'], asn1Spec=rfc7292.SafeContents(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(sc.prettyPrint())
self.assertEqual(ci['content'], der_encoder(sc))
for sb in sc:
if sb['bagId'] == rfc7292.id_pkcs8ShroudedKeyBag:
bv = sb['bagValue']
enc_alg = bv['encryptionAlgorithm']['algorithm']
self.assertEqual(
rfc7292.pbeWithSHAAnd3_KeyTripleDES_CBC, enc_alg)
enc_alg_param = bv['encryptionAlgorithm']['parameters']
self.assertEqual(2000, enc_alg_param['iterations'])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
etingof/pyasn1-modules
|
tests/test_rfc7292.py
|
Python
|
bsd-2-clause
| 8,295
| 0.000362
|
# You are climbing a stair case. It takes n steps to reach to the top.
#
# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
#
# Note: Given n will be a positive integer.
#
# Example 1:
#
# Input: 2
# Output: 2
# Explanation: There are two ways to climb to the top.
# 1. 1 step + 1 step
# 2. 2 steps
#
# Example 2:
#
# Input: 3
# Output: 3
# Explanation: There are three ways to climb to the top.
# 1. 1 step + 1 step + 1 step
# 2. 1 step + 2 steps
# 3. 2 steps + 1 step
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
table = [1, 2]
i = 2
while i < n:
table.append(table[i-1] + table[i-2])
i += 1
return table[n-1]
# Note:
# Generate two trees one with 1 step and other with 2 step and add both
|
jigarkb/CTCI
|
LeetCode/070-E-ClimbingStairs.py
|
Python
|
mit
| 877
| 0.00114
|
from django.http import HttpResponse
def hello_world(request):
return HttpResponse("Hello, world.")
|
xyloeric/pi
|
piExp/pi/views.py
|
Python
|
bsd-3-clause
| 101
| 0.029703
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
from . import config
from . import parsers
def main():
if len(sys.argv) == 2:
filename = sys.argv[1]
filename = parsers.to_unicode(filename)
parsers.run(filename)
else:
msg = 'Usage: {} <metadata>'.format(sys.argv[0])
print(msg)
print('\nPredefined Variables')
for k, v in config.PREDEFINED_VARIABLE_TABLE.items():
print('{}\t: {}'.format(k, v))
if __name__ == '__main__':
main()
|
if1live/easylinker
|
easylinker/cli.py
|
Python
|
mit
| 658
| 0.004559
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
@decorators.related_bug('1420008')
@decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
def test_create_image_owner_param(self):
# NOTE: Create image with owner different from tenant owner by
# using "owner" parameter requires an admin privileges.
random_id = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id)
self.addCleanup(self.admin_client.delete_image, image['id'])
image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id, image_info['owner'])
@decorators.related_bug('1420008')
@decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
def test_update_image_owner_param(self):
random_id_1 = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id_1)
self.addCleanup(self.admin_client.delete_image, image['id'])
created_image_info = self.admin_client.show_image(image['id'])
random_id_2 = data_utils.rand_uuid_hex()
self.admin_client.update_image(
image['id'], [dict(replace="/owner", value=random_id_2)])
updated_image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id_2, updated_image_info['owner'])
self.assertNotEqual(created_image_info['owner'],
updated_image_info['owner'])
|
masayukig/tempest
|
tempest/api/image/v2/admin/test_images.py
|
Python
|
apache-2.0
| 2,341
| 0
|
"""
File: DaqDevDiscovery01.py
Library Call Demonstrated: mcculw.ul.get_daq_device_inventory()
mcculw.ul.create_daq_device()
mcculw.ul.release_daq_device()
Purpose: Discovers DAQ devices and assigns board number to
the detected devices.
Demonstration: Displays the detected DAQ devices and flashes the
LED of the selected device.
Other Library Calls: mcculw.ul.ignore_instacal()
mcculw.ul.flash_led()
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from tkinter import StringVar
from tkinter.ttk import Combobox # @UnresolvedImport
from mcculw import ul
from mcculw.enums import InterfaceType
from mcculw.ul import ULError
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .ui_examples_util import UIExample, show_ul_error
class DaqDevDiscovery01(UIExample):
def __init__(self, master):
super(DaqDevDiscovery01, self).__init__(master)
self.board_num = 0
self.device_created = False
# Tell the UL to ignore any boards configured in InstaCal
ul.ignore_instacal()
self.create_widgets()
def discover_devices(self):
self.inventory = ul.get_daq_device_inventory(InterfaceType.ANY)
if len(self.inventory) > 0:
combobox_values = []
for device in self.inventory:
combobox_values.append(str(device))
self.devices_combobox["values"] = combobox_values
self.devices_combobox.current(0)
self.status_label["text"] = (str(len(self.inventory))
+ " DAQ Device(s) Discovered")
self.devices_combobox["state"] = "readonly"
self.flash_led_button["state"] = "normal"
else:
self.devices_combobox["values"] = [""]
self.devices_combobox.current(0)
self.status_label["text"] = "No Devices Discovered"
self.devices_combobox["state"] = "disabled"
self.flash_led_button["state"] = "disabled"
def flash_led(self):
try:
# Flash the device LED
ul.flash_led(self.board_num)
except ULError as e:
show_ul_error(e)
def selected_device_changed(self, *args): # @UnusedVariable
selected_index = self.devices_combobox.current()
inventory_count = len(self.inventory)
if self.device_created:
# Release any previously configured DAQ device from the UL.
ul.release_daq_device(self.board_num)
self.device_created = False
if inventory_count > 0 and selected_index < inventory_count:
descriptor = self.inventory[selected_index]
# Update the device ID label
self.device_id_label["text"] = descriptor.unique_id
# Create the DAQ device from the descriptor
# For performance reasons, it is not recommended to create
# and release the device every time hardware communication is
# required. Instead, create the device once and do not release
# it until no additional library calls will be made for this
# device
ul.create_daq_device(self.board_num, descriptor)
self.device_created = True
def create_widgets(self):
'''Create the tkinter UI'''
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
discover_button = tk.Button(main_frame)
discover_button["text"] = "Discover DAQ Devices"
discover_button["command"] = self.discover_devices
discover_button.pack(padx=3, pady=3)
self.status_label = tk.Label(main_frame)
self.status_label["text"] = "Status"
self.status_label.pack(anchor=tk.NW, padx=3, pady=3)
results_group = tk.LabelFrame(self, text="Discovered Devices")
results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
self.selected_device_textvar = StringVar()
self.selected_device_textvar.trace('w', self.selected_device_changed)
self.devices_combobox = Combobox(
results_group, textvariable=self.selected_device_textvar)
self.devices_combobox["state"] = "disabled"
self.devices_combobox.pack(fill=tk.X, padx=3, pady=3)
device_id_frame = tk.Frame(results_group)
device_id_frame.pack(anchor=tk.NW)
device_id_left_label = tk.Label(device_id_frame)
device_id_left_label["text"] = "Device Identifier:"
device_id_left_label.grid(row=0, column=0, sticky=tk.W, padx=3, pady=3)
self.device_id_label = tk.Label(device_id_frame)
self.device_id_label.grid(row=0, column=1, sticky=tk.W, padx=3, pady=3)
self.flash_led_button = tk.Button(results_group)
self.flash_led_button["text"] = "Flash LED"
self.flash_led_button["command"] = self.flash_led
self.flash_led_button["state"] = "disabled"
self.flash_led_button.pack(padx=3, pady=3)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.master.destroy
quit_button.grid(row=0, column=1, padx=3, pady=3)
# Start the example if this module is being run
if __name__ == "__main__":
# Start the example
DaqDevDiscovery01(master=tk.Tk()).mainloop()
|
mccdaq/mcculw
|
examples/ui/DaqDevDiscovery01.py
|
Python
|
mit
| 5,694
| 0
|
from django.db.models import Q
from django_filters import rest_framework as filters
from adesao.models import SistemaCultura, UFS
from planotrabalho.models import Componente
class SistemaCulturaFilter(filters.FilterSet):
ente_federado = filters.CharFilter(
field_name='ente_federado__nome__unaccent', lookup_expr='icontains')
estado_sigla = filters.CharFilter(method='sigla_filter')
cnpj_prefeitura = filters.CharFilter(
field_name='sede__cnpj', lookup_expr='contains')
situacao_adesao = filters.CharFilter(
field_name='estado_processo', lookup_expr='exact')
data_adesao = filters.DateFilter(
field_name='data_publicacao_acordo')
data_adesao_min = filters.DateFilter(
field_name='data_publicacao_acordo', lookup_expr=('gte'))
data_adesao_max = filters.DateFilter(
field_name='data_publicacao_acordo', lookup_expr=('lte'))
data_componente_min = filters.DateFilter(
field_name='data_componente_acordo', lookup_expr=('gte'),
method='data_componente_min')
data_componente_max = filters.DateFilter(
field_name='data_componente_acordo', lookup_expr=('lte'),
method='data_componente_max')
data_lei_min = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('gte'))
data_lei_max = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('lte'))
data_orgao_gestor_min = filters.DateFilter(
field_name='orgao_gestor__data_publicacao', lookup_expr=('gte'))
data_orgao_gestor_max = filters.DateFilter(
field_name='orgao_gestor__data_publicacao', lookup_expr=('lte'))
data_orgao_gestor_cnpj_min = filters.DateFilter(
field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('gte'))
data_orgao_gestor_cnpj_max = filters.DateFilter(
field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('lte'))
data_conselho_min = filters.DateFilter(
field_name='conselho__data_publicacao', lookup_expr=('gte'))
data_conselho_max = filters.DateFilter(
field_name='conselho__data_publicacao', lookup_expr=('lte'))
data_conselho_lei_min = filters.DateFilter(
field_name='conselho__lei__data_publicacao', lookup_expr=('gte'))
data_conselho_lei_max = filters.DateFilter(
field_name='conselho__lei__data_publicacao', lookup_expr=('lte'))
data_fundo_cultura_min = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('gte'))
data_fundo_cultura_max = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('lte'))
data_fundo_cultura_cnpj_min = filters.DateFilter(
field_name='fundo_cultura__comprovante_cnpj__data_envio', lookup_expr=('gte'))
data_fundo_cultura_cnpj_max = filters.DateFilter(
field_name='fundo_cultura__comprovante_cnpj__data_envio', lookup_expr=('lte'))
data_plano_min = filters.DateFilter(
field_name='plano__data_publicacao', lookup_expr=('gte'))
data_plano_max = filters.DateFilter(
field_name='plano__data_publicacao', lookup_expr=('lte'))
data_plano_meta_min = filters.DateFilter(
field_name='plano__metas__data_envio', lookup_expr=('gte'))
data_plano_meta_max = filters.DateFilter(
field_name='plano__metas__data_envio', lookup_expr=('lte'))
orgao_gestor_dados_bancarios = filters.BooleanFilter(method='gestor_dados_bancarios_filter')
fundo_cultura_dados_bancarios = filters.BooleanFilter(method='fundo_cultura_dados_bancarios_filter')
situacao_lei_sistema = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='legislacao__situacao',
to_field_name='situacao'
)
situacao_orgao_gestor = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='orgao_gestor__situacao',
to_field_name='situacao'
)
situacao_conselho_cultural = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='conselho__situacao',
to_field_name='situacao'
)
situacao_fundo_cultura = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='fundo_cultura__situacao',
to_field_name='situacao'
)
situacao_plano_cultura = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='plano__situacao',
to_field_name='situacao'
)
municipal = filters.BooleanFilter(method='municipal_filter')
estadual = filters.BooleanFilter(method='estadual_filter')
class Meta:
model = SistemaCultura
exclude = (
'oficio_cadastrador',
'oficio_prorrogacao_prazo',)
def gestor_dados_bancarios_filter(self, queryset, name, value):
queryset = queryset.exclude(orgao_gestor__banco='').exclude(orgao_gestor__agencia='').exclude(orgao_gestor__conta='').exclude(orgao_gestor__banco__isnull=True).exclude(orgao_gestor__agencia__isnull=True).exclude(orgao_gestor__conta__isnull=True)
return queryset
def fundo_cultura_dados_bancarios_filter(self, queryset, name, value):
queryset = queryset.exclude(fundo_cultura__banco='').exclude(fundo_cultura__agencia='').exclude(fundo_cultura__conta='').exclude(fundo_cultura__banco__isnull=True).exclude(fundo_cultura__agencia__isnull=True).exclude(fundo_cultura__conta__isnull=True)
return queryset
def sigla_filter(self, queryset, name, value):
try:
inverseUf = {value: key for key, value in UFS.items()}
cod_ibge = inverseUf[value.upper()]
except Exception:
cod_ibge = value
return queryset.filter(Q(ente_federado__cod_ibge__startswith=cod_ibge))
def estadual_filter(self, queryset, name, value):
pular_filtro = self.checar_filtro_municipal_estadual_ativos()
if(pular_filtro):
return queryset
if value:
queryset = queryset.filter(ente_federado__cod_ibge__lte=100)
return queryset
def municipal_filter(self, queryset, name, value):
pular_filtro = self.checar_filtro_municipal_estadual_ativos()
if(pular_filtro):
return queryset
if value:
queryset = queryset.filter(ente_federado__cod_ibge__gt=100)
return queryset
def checar_filtro_municipal_estadual_ativos(self):
try:
estadual_filter = self.data.getlist('estadual')[0]
municipal_filter = self.data.getlist('municipal')[0]
except IndexError:
return False
if(estadual_filter == 'true' and municipal_filter == 'true'):
return True
return False
class PlanoTrabalhoFilter(SistemaCulturaFilter):
class Meta:
model = SistemaCultura
exclude = (
'oficio_cadastrador',
'oficio_prorrogacao_prazo',)
|
culturagovbr/sistema-nacional-cultura
|
apiv2/filters.py
|
Python
|
agpl-3.0
| 6,975
| 0.001434
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
import inspect
from collections import namedtuple
from django.core.exceptions import FieldDoesNotExist
from django.db.backends import utils
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
@classmethod
def _refs_aggregate(cls, obj, existing_aggregates):
if not isinstance(obj, tree.Node):
aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates)
if not aggregate and hasattr(obj[1], 'refs_aggregate'):
return obj[1].refs_aggregate(existing_aggregates)
return aggregate, aggregate_lookups
for c in obj.children:
aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates)
if aggregate:
return aggregate, aggregate_lookups
return False, ()
def refs_aggregate(self, existing_aggregates):
if not existing_aggregates:
return False
return self._refs_aggregate(self, existing_aggregates)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, cls=None):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
if not attrs:
return model
opts = model._meta
# Never create deferred models based on deferred model
if model._deferred:
# Deferred models are proxies for the non-deferred model. We never
# create chains of defers => proxy_for_model is the non-deferred
# model.
model = opts.proxy_for_model
# The app registry wants a unique name for each model, otherwise the new
# class won't be created (we get an exception). Therefore, we generate
# the name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(attrs)))
name = utils.truncate_name(name, 80, 32)
try:
return opts.apps.get_model(model._meta.app_label, name)
except LookupError:
class Meta:
proxy = True
apps = opts.apps
app_label = opts.app_label
overrides = {attr: DeferredAttribute(attr, model) for attr in attrs}
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
def refs_aggregate(lookup_parts, aggregates):
"""
A helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in aggregates and aggregates[level_n_lookup].contains_aggregate:
return aggregates[level_n_lookup], lookup_parts[n:]
return False, ()
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OnetoOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
|
yephper/django
|
django/db/models/query_utils.py
|
Python
|
bsd-3-clause
| 13,827
| 0.000579
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Script for SpecGAN on a waveform dataset.
This follows the origonal SpecGAN training,
where the magnitude spectrums are normalized
to sit between -1 and 1.
"""
import os
import tensorflow as tf
import numpy as np
from tensorflow.keras import activations, utils
from audio_synthesis.structures import spec_gan
from audio_synthesis.models import wgan
from audio_synthesis.datasets import waveform_dataset
from audio_synthesis.utils import waveform_save_helper as save_helper
# Setup Paramaters
D_UPDATES_PER_G = 5
Z_DIM = 64
BATCH_SIZE = 64
EPOCHS = 1800
SAMPLING_RATE = 16000
GRIFFIN_LIM_ITERATIONS = 16
FFT_FRAME_LENGTH = 512
FFT_FRAME_STEP = 128
LOG_MAGNITUDE = True
Z_IN_SHAPE = [4, 8, 1024]
SPECTOGRAM_IMAGE_SHAPE = [-1, 128, 256, 1]
CHECKPOINT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/training_checkpoints/'
RESULT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/audio/'
DATASET_PATH = 'data/SpeechMNIST_1850.npz'
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU')))
raw_dataset, magnitude_stats, _ =\
waveform_dataset.get_magnitude_phase_dataset(
DATASET_PATH, FFT_FRAME_LENGTH, FFT_FRAME_STEP, LOG_MAGNITUDE
)
raw_dataset = raw_dataset[:, :, :, 0] # Remove the phase information
normalized_raw_dataset = []
pb_i = utils.Progbar(len(raw_dataset))
for data_point in raw_dataset:
normalized_raw_dataset.append(waveform_dataset.normalize(
data_point, *magnitude_stats
))
pb_i.add(1)
normalized_raw_dataset = np.array(normalized_raw_dataset)
generator = spec_gan.Generator(activation=activations.tanh, in_shape=Z_IN_SHAPE)
discriminator = spec_gan.Discriminator(input_shape=SPECTOGRAM_IMAGE_SHAPE)
generator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
get_waveform = lambda magnitude:\
save_helper.get_waveform_from_normalized_magnitude(
magnitude, magnitude_stats, GRIFFIN_LIM_ITERATIONS, FFT_FRAME_LENGTH,
FFT_FRAME_STEP, LOG_MAGNITUDE
)
save_examples = lambda epoch, real, generated:\
save_helper.save_wav_data(
epoch, real, generated, SAMPLING_RATE, RESULT_DIR, get_waveform
)
spec_gan_model = wgan.WGAN(
normalized_raw_dataset, generator, [discriminator], Z_DIM, generator_optimizer,
discriminator_optimizer, discriminator_training_ratio=D_UPDATES_PER_G,
batch_size=BATCH_SIZE, epochs=EPOCHS, checkpoint_dir=CHECKPOINT_DIR,
fn_save_examples=save_examples
)
spec_gan_model.restore('ckpt-129', 1290)
spec_gan_model.train()
if __name__ == '__main__':
main()
|
googleinterns/audio_synthesis
|
experiments/representation_study/train_spec_gan.py
|
Python
|
apache-2.0
| 3,424
| 0.003505
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Search Ads operators.
"""
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook
from airflow.utils.decorators import apply_defaults
class GoogleSearchAdsInsertReportOperator(BaseOperator):
"""
Inserts a report request into the reporting system.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/request
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsInsertReportOperator`
:param report: Report to be generated
:type report: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report",)
template_ext = (".json",)
@apply_defaults
def __init__(
self,
report: Dict[str, Any],
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Generating Search Ads report")
response = hook.insert_report(report=self.report)
report_id = response.get("id")
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Report generated, id: %s", report_id)
return response
class GoogleSearchAdsDownloadReportOperator(BaseOperator):
"""
Downloads a report to GCS bucket.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/getFile
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsGetfileReportOperator`
:param report_id: ID of the report.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file. If not provided then
report_id is used.
:type report_name: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_name", "report_id", "bucket_name")
@apply_defaults
def __init__(
self,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
def _resolve_file_name(self, name: str) -> str:
csv = ".csv"
gzip = ".gz"
if not name.endswith(csv):
name += csv
if self.gzip:
name += gzip
return name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
@staticmethod
def _handle_report_fragment(fragment: bytes) -> bytes:
fragment_records = fragment.split(b"\n", 1)
if len(fragment_records) > 1:
return fragment_records[1]
return b""
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
# Resolve file name of the report
report_name = self.report_name or self.report_id
report_name = self._resolve_file_name(report_name)
response = hook.get(report_id=self.report_id)
if not response['isReportReady']:
raise AirflowException('Report {} is not ready yet'.format(self.report_id))
# Resolve report fragments
fragments_count = len(response["files"])
# Download chunks of report's data
self.log.info("Downloading Search Ads report %s", self.report_id)
with NamedTemporaryFile() as temp_file:
for i in range(fragments_count):
byte_content = hook.get_file(
report_fragment=i, report_id=self.report_id
)
fragment = (
byte_content
if i == 0
else self._handle_report_fragment(byte_content)
)
temp_file.write(fragment)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
)
self.xcom_push(context, key="file_name", value=report_name)
|
spektom/incubator-airflow
|
airflow/providers/google/marketing_platform/operators/search_ads.py
|
Python
|
apache-2.0
| 7,440
| 0.00121
|
from fruits import validate_fruit
fruits = ["banana", "lemon", "apple", "orange", "batman"]
print fruits
def list_fruits(fruits, byName=True):
if byName:
# WARNING: this won't make a copy of the list and return it. It will change the list FOREVER
fruits.sort()
for index, fruit in enumerate(fruits):
if validate_fruit(fruit):
print "Fruit nr %d is %s" % (index, fruit)
else:
print "This %s is no fruit!" % (fruit)
list_fruits(fruits)
print fruits
|
Painatalman/python101
|
sources/101_test.py
|
Python
|
apache-2.0
| 519
| 0.003854
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from kazoo.client import KazooClient
from libzookeeper.conf import PRINCIPAL_NAME
def get_children_data(ensemble, namespace, read_only=True):
zk = KazooClient(hosts=ensemble, read_only=read_only, sasl_server_principal=PRINCIPAL_NAME.get())
zk.start()
children_data = []
children = zk.get_children(namespace)
for node in children:
data, stat = zk.get("%s/%s" % (namespace, node))
children_data.append(data)
zk.stop()
return children_data
|
epssy/hue
|
desktop/libs/libzookeeper/src/libzookeeper/models.py
|
Python
|
apache-2.0
| 1,274
| 0.007064
|
#!/usr/bin/env python
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
import django
import os
import sys
from django.conf import ENVIRONMENT_VARIABLE
from django.core import management
if len(sys.argv) == 1:
os.environ[ENVIRONMENT_VARIABLE] = 'testing.settings'
else:
os.environ[ENVIRONMENT_VARIABLE] = sys.argv[1]
if django.VERSION[0] == 1 and django.VERSION[1] <= 5:
management.call_command('test', 'unit_tests')
else:
management.call_command('test', 'testing.unit_tests')
|
tlevine/django-inplaceedit
|
testing/run_tests.py
|
Python
|
lgpl-3.0
| 1,189
| 0.000841
|
# 每个人都有一个preference的排序,在不违反每个人的preference的情况下得到总体的preference的排序 拓扑排序解决(https://instant.1point3acres.com/thread/207601)
import itertools
import collections
def preferenceList1(prefList): # topological sort 1
pairs = []
for lis in prefList:
for left, right in zip(lis, lis[1:]):
pairs += (left, right),
allItems, res = set(itertools.chain(*pairs)), []
while pairs:
free = allItems - set(zip(*pairs)[1])
if not free:
None
res += list(free)
pairs = filter(free.isdisjoint, pairs)
allItems -= free
return res + list(allItems)
print(preferenceList1([[1, 2, 3, 4], ['a', 'b', 'c', 'd'], ['a', 1, 8], [2, 'b', 'e'], [3, 'c']]))
|
seanxwzhang/LeetCode
|
Airbnb/preference_list.py
|
Python
|
mit
| 788
| 0.007003
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from osf.utils import permissions
@pytest.fixture()
def admin():
return AuthUserFactory()
@pytest.fixture()
def base_url():
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def read_contrib():
return AuthUserFactory()
@pytest.fixture()
def write_contrib():
return AuthUserFactory()
@pytest.fixture()
def valid_contributors(admin, read_contrib, write_contrib):
return [
admin._id,
read_contrib._id,
write_contrib._id,
]
@pytest.fixture()
def private_node_one(admin, read_contrib, write_contrib):
private_node_one = ProjectFactory(
is_public=False,
creator=admin,
title='Private One')
private_node_one.add_contributor(
read_contrib, permissions=[
permissions.READ], save=True)
private_node_one.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return private_node_one
@pytest.fixture()
def private_node_one_anonymous_link(private_node_one):
private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
private_node_one_anonymous_link.nodes.add(private_node_one)
private_node_one_anonymous_link.save()
return private_node_one_anonymous_link
@pytest.fixture()
def private_node_one_private_link(private_node_one):
private_node_one_private_link = PrivateLinkFactory(anonymous=False)
private_node_one_private_link.nodes.add(private_node_one)
private_node_one_private_link.save()
return private_node_one_private_link
@pytest.fixture()
def private_node_one_url(private_node_one):
return '/{}nodes/{}/'.format(API_BASE, private_node_one._id)
@pytest.fixture()
def private_node_two(admin, read_contrib, write_contrib):
private_node_two = ProjectFactory(
is_public=False,
creator=admin,
title='Private Two')
private_node_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
private_node_two.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return private_node_two
@pytest.fixture()
def private_node_two_url(private_node_two):
return '/{}nodes/{}/'.format(API_BASE, private_node_two._id)
@pytest.fixture()
def public_node_one(admin, read_contrib, write_contrib):
public_node_one = ProjectFactory(
is_public=True, creator=admin, title='Public One')
public_node_one.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
public_node_one.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return public_node_one
@pytest.fixture()
def public_node_one_anonymous_link(public_node_one):
public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
public_node_one_anonymous_link.nodes.add(public_node_one)
public_node_one_anonymous_link.save()
return public_node_one_anonymous_link
@pytest.fixture()
def public_node_one_private_link(public_node_one):
public_node_one_private_link = PrivateLinkFactory(anonymous=False)
public_node_one_private_link.nodes.add(public_node_one)
public_node_one_private_link.save()
return public_node_one_private_link
@pytest.fixture()
def public_node_one_url(public_node_one):
return '/{}nodes/{}/'.format(API_BASE, public_node_one._id)
@pytest.fixture()
def public_node_two(admin, read_contrib, write_contrib):
public_node_two = ProjectFactory(
is_public=True, creator=admin, title='Public Two')
public_node_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
public_node_two.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return public_node_two
@pytest.fixture()
def public_node_two_url(public_node_two):
return '/{}nodes/{}/'.format(API_BASE, public_node_two._id)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.usefixtures(
'admin',
'read_contrib',
'write_contrib',
'valid_contributors',
'private_node_one',
'private_node_one_anonymous_link',
'private_node_one_private_link',
'private_node_one_url',
'private_node_two',
'private_node_two_url',
'public_node_one',
'public_node_one_anonymous_link',
'public_node_one_private_link',
'public_node_one_url',
'public_node_two',
'public_node_two_url')
class TestNodeDetailViewOnlyLinks:
def test_private_node(
self, app, admin, read_contrib, valid_contributors,
private_node_one, private_node_one_url,
private_node_one_private_link,
private_node_one_anonymous_link,
public_node_one_url,
public_node_one_private_link,
public_node_one_anonymous_link):
# test_private_node_with_link_works_when_using_link
res_normal = app.get(private_node_one_url, auth=read_contrib.auth)
assert res_normal.status_code == 200
res_linked = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res_linked.status_code == 200
assert res_linked.json['data']['attributes']['current_user_permissions'] == [
'read']
# Remove any keys that will be different for view-only responses
res_normal_json = res_normal.json
res_linked_json = res_linked.json
user_can_comment = res_normal_json['data']['attributes'].pop(
'current_user_can_comment')
view_only_can_comment = res_linked_json['data']['attributes'].pop(
'current_user_can_comment')
assert user_can_comment
assert not view_only_can_comment
# test_private_node_with_link_unauthorized_when_not_using_link
res = app.get(private_node_one_url, expect_errors=True)
assert res.status_code == 401
# test_private_node_with_link_anonymous_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_private_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
}, auth=admin.auth)
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_public_node_with_link_anonymous_does_not_expose_user_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_public_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_public_node_with_link_unused_does_expose_contributor_id
res = app.get(public_node_one_url, {
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_view_only_link_does_not_grant_write_permission
payload = {
'data': {
'attributes': {
'title': 'Cannot touch this'},
'id': private_node_one._id,
'type': 'nodes',
}
}
res = app.patch_json_api(private_node_one_url, payload, {
'view_only': private_node_one_private_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_view_only_link_from_anther_project_does_not_grant_view_permission
res = app.get(private_node_one_url, {
'view_only': public_node_one_private_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_private_project_logs_with_anonymous_link_does_not_expose_user_id
res = app.get(private_node_one_url + 'logs/', {
'view_only': str(private_node_one_anonymous_link.key),
})
assert res.status_code == 200
body = res.body
for id in valid_contributors:
assert id not in body
# test_private_project_with_anonymous_link_does_not_expose_registrations_or_forks
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
})
assert res.status_code == 200
attributes = res.json['data']['attributes']
relationships = res.json['data']['relationships']
if 'embeds' in res.json['data']:
embeds = res.json['data']['embeds']
else:
embeds = {}
assert 'current_user_can_comment' not in attributes
assert 'citation' not in relationships
assert 'custom_citation' not in attributes
assert 'node_license' not in attributes
assert 'registrations' not in relationships
assert 'forks' not in relationships
assert 'registrations' not in embeds
assert 'forks' not in embeds
# test_deleted_anonymous_VOL_gives_401_for_unauthorized
private_node_one_anonymous_link.is_deleted = True
private_node_one_anonymous_link.save()
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_deleted_anonymous_VOL_does_not_anonymize_data_for_authorized
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
}, auth=admin.auth)
assert res.status_code == 200
assert 'anonymous' not in res.json['meta']
attributes = res.json['data']['attributes']
relationships = res.json['data']['relationships']
assert 'current_user_can_comment' in attributes
assert 'citation' in relationships
assert 'custom_citation' in attributes
assert 'node_license' in attributes
assert 'forks' in relationships
# test_bad_view_only_link_does_not_modify_permissions
res = app.get(private_node_one_url + 'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, expect_errors=True)
assert res.status_code == 401
res = app.get(private_node_one_url + 'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, auth=admin.auth)
assert res.status_code == 200
# test_view_only_key_in_relationships_links
res = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res.status_code == 200
res_relationships = res.json['data']['relationships']
for key, value in res_relationships.items():
if isinstance(value, list):
for relationship in value:
links = relationship.get('links', {})
if links.get('related', False):
assert private_node_one_private_link.key in links['related']['href']
if links.get('self', False):
assert private_node_one_private_link.key in links['self']['href']
else:
links = value.get('links', {})
if links.get('related', False):
assert private_node_one_private_link.key in links['related']['href']
if links.get('self', False):
assert private_node_one_private_link.key in links['self']['href']
# test_view_only_key_in_self_and_html_links
res = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res.status_code == 200
links = res.json['data']['links']
assert private_node_one_private_link.key in links['self']
assert private_node_one_private_link.key in links['html']
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.usefixtures(
'admin',
'read_contrib',
'write_contrib',
'valid_contributors',
'private_node_one',
'private_node_one_anonymous_link',
'private_node_one_private_link',
'private_node_one_url',
'private_node_two',
'private_node_two_url',
'public_node_one',
'public_node_one_anonymous_link',
'public_node_one_private_link',
'public_node_one_url',
'public_node_two',
'public_node_two_url')
class TestNodeListViewOnlyLinks:
def test_node_list_view_only_links(
self, app, valid_contributors,
private_node_one,
private_node_one_private_link,
private_node_one_anonymous_link,
base_url):
# test_private_link_does_not_show_node_in_list
res = app.get(base_url, {
'view_only': private_node_one_private_link.key,
})
assert res.status_code == 200
nodes = res.json['data']
node_ids = []
for node in nodes:
node_ids.append(node['id'])
assert private_node_one._id not in node_ids
# test_anonymous_link_does_not_show_contributor_id_in_node_list
res = app.get(base_url, {
'view_only': private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
nodes = res.json['data']
assertions = 0
for node in nodes:
embeds = node.get('embeds', None)
assert embeds is None or 'contributors' not in embeds
assertions += 1
assert assertions != 0
# test_non_anonymous_link_does_show_contributor_id_in_node_list
res = app.get(base_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert contributor['id'].split('-')[1] in valid_contributors
assert assertions != 0
|
pattisdr/osf.io
|
api_tests/nodes/views/test_view_only_query_parameter.py
|
Python
|
apache-2.0
| 15,854
| 0.000442
|
from io import BytesIO
import sys
from mitmproxy.net import wsgi
from mitmproxy.net.http import Headers
def tflow():
headers = Headers(test=b"value")
req = wsgi.Request("http", "GET", "/", "HTTP/1.1", headers, "")
return wsgi.Flow(("127.0.0.1", 8888), req)
class ExampleApp:
def __init__(self):
self.called = False
def __call__(self, environ, start_response):
self.called = True
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello', b' world!\n']
class TestWSGI:
def test_make_environ(self):
w = wsgi.WSGIAdaptor(None, "foo", 80, "version")
tf = tflow()
assert w.make_environ(tf, None)
tf.request.path = "/foo?bar=voing"
r = w.make_environ(tf, None)
assert r["QUERY_STRING"] == "bar=voing"
def test_serve(self):
ta = ExampleApp()
w = wsgi.WSGIAdaptor(ta, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
err = w.serve(f, wfile)
assert ta.called
assert not err
val = wfile.getvalue()
assert b"Hello world" in val
assert b"Server:" in val
def _serve(self, app):
w = wsgi.WSGIAdaptor(app, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
w.serve(f, wfile)
return wfile.getvalue()
def test_serve_empty_body(self):
def app(environ, start_response):
status = '200 OK'
response_headers = [('Foo', 'bar')]
start_response(status, response_headers)
return []
assert self._serve(app)
def test_serve_double_start(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
start_response(status, response_headers)
assert b"Internal Server Error" in self._serve(app)
def test_serve_single_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers, ei)
yield b""
assert b"Internal Server Error" in self._serve(app)
def test_serve_double_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield b"aaa"
start_response(status, response_headers, ei)
yield b"bbb"
assert b"Internal Server Error" in self._serve(app)
|
mosajjal/mitmproxy
|
test/mitmproxy/net/test_wsgi.py
|
Python
|
mit
| 3,186
| 0.000942
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import copy
import hashlib
import os
from fabric.api import env
from fabric.api import run
from fabric.api import settings
from oslo_config import cfg
from cloudferrylib.base.action import action
from cloudferrylib.os.actions import task_transfer
from cloudferrylib.utils.utils import forward_agent
from cloudferrylib.utils import utils as utl
from cloudferrylib.utils import qemu_img as qemu_img_util
CONF = cfg.CONF
CLOUD = 'cloud'
BACKEND = 'backend'
CEPH = 'ceph'
ISCSI = 'iscsi'
COMPUTE = 'compute'
INSTANCES = 'instances'
INSTANCE_BODY = 'instance'
INSTANCE = 'instance'
DIFF = 'diff'
EPHEMERAL = 'ephemeral'
DIFF_OLD = 'diff_old'
EPHEMERAL_OLD = 'ephemeral_old'
PATH_DST = 'path_dst'
HOST_DST = 'host_dst'
PATH_SRC = 'path_src'
HOST_SRC = 'host_src'
BACKING_FILE_DST = 'backing_file_dst'
TEMP = 'temp'
FLAVORS = 'flavors'
TRANSPORTER_MAP = {CEPH: {CEPH: 'SSHCephToCeph',
ISCSI: 'SSHCephToFile'},
ISCSI: {CEPH: 'SSHFileToCeph',
ISCSI: 'SSHFileToFile'}}
class TransportEphemeral(action.Action):
# TODO constants
def run(self, info=None, **kwargs):
info = copy.deepcopy(info)
# Init before run
new_info = {
utl.INSTANCES_TYPE: {
}
}
# Get next one instance
for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems():
is_ephemeral = instance[utl.INSTANCE_BODY]['is_ephemeral']
one_instance = {
utl.INSTANCES_TYPE: {
instance_id: instance
}
}
if is_ephemeral:
self.copy_ephemeral(self.src_cloud,
self.dst_cloud,
one_instance)
new_info[utl.INSTANCES_TYPE].update(
one_instance[utl.INSTANCES_TYPE])
return {
'info': new_info
}
@staticmethod
def delete_remote_file_on_compute(path_file, host_cloud,
host_instance):
with settings(host_string=host_cloud,
connection_attempts=env.connection_attempts):
with forward_agent(env.key_filename):
run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" %
(host_instance, path_file))
def copy_data_via_ssh(self, src_cloud, dst_cloud, info, body, resources,
types):
dst_storage = dst_cloud.resources[resources]
src_compute = src_cloud.resources[resources]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
ssh_driver = (CONF.migrate.copy_backend
if CONF.migrate.direct_compute_transfer
else TRANSPORTER_MAP[src_backend][dst_backend])
transporter = task_transfer.TaskTransfer(
self.init,
ssh_driver,
resource_name=types,
resource_root_name=body)
transporter.run(info=info)
def copy_ephemeral(self, src_cloud, dst_cloud, info):
dst_storage = dst_cloud.resources[utl.COMPUTE_RESOURCE]
src_compute = src_cloud.resources[utl.COMPUTE_RESOURCE]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
if (src_backend == CEPH) and (dst_backend == ISCSI):
self.copy_ephemeral_ceph_to_iscsi(src_cloud, dst_cloud, info)
elif (src_backend == ISCSI) and (dst_backend == CEPH):
self.copy_ephemeral_iscsi_to_ceph(src_cloud, info)
else:
self.copy_data_via_ssh(src_cloud,
dst_cloud,
info,
utl.EPHEMERAL_BODY,
utl.COMPUTE_RESOURCE,
utl.INSTANCES_TYPE)
self.rebase_diff(dst_cloud, info)
def copy_ephemeral_ceph_to_iscsi(self, src_cloud, dst_cloud, info):
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][ISCSI],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
instances = info[utl.INSTANCES_TYPE]
temp_src = src_cloud.cloud_config.cloud.temp
host_dst = dst_cloud.cloud_config.cloud.ssh_host
qemu_img_dst = dst_cloud.qemu_img
qemu_img_src = src_cloud.qemu_img
temp_path_src = temp_src + "/%s" + utl.DISK_EPHEM
for inst_id, inst in instances.iteritems():
path_src_id_temp = temp_path_src % inst_id
host_compute_dst = inst[EPHEMERAL][HOST_DST]
inst[EPHEMERAL][
BACKING_FILE_DST] = qemu_img_dst.detect_backing_file(
inst[EPHEMERAL][PATH_DST], host_compute_dst)
self.delete_remote_file_on_compute(inst[EPHEMERAL][PATH_DST],
host_dst,
host_compute_dst)
qemu_img_src.convert(
utl.QCOW2,
'rbd:%s' % inst[EPHEMERAL][PATH_SRC], path_src_id_temp)
inst[EPHEMERAL][PATH_SRC] = path_src_id_temp
transporter.run(info=info)
for inst_id, inst in instances.iteritems():
host_compute_dst = inst[EPHEMERAL][HOST_DST]
qemu_img_dst.diff_rebase(inst[EPHEMERAL][BACKING_FILE_DST],
inst[EPHEMERAL][PATH_DST],
host_compute_dst)
def copy_ephemeral_iscsi_to_ceph(self, src_cloud, info):
instances = info[utl.INSTANCES_TYPE]
qemu_img_src = src_cloud.qemu_img
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][CEPH],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
for inst_id, inst in instances.iteritems():
path_src = inst[EPHEMERAL][PATH_SRC]
path_src_temp_raw = path_src + "." + utl.RAW
host_src = inst[EPHEMERAL][HOST_SRC]
qemu_img_src.convert(utl.RAW,
path_src,
path_src_temp_raw,
host_src)
inst[EPHEMERAL][PATH_SRC] = path_src_temp_raw
transporter.run(info=info)
@staticmethod
def rebase_diff(dst_cloud, info):
for instance_id, obj in info[utl.INSTANCES_TYPE].items():
image_id = obj['instance']['image_id']
new_backing_file = hashlib.sha1(image_id).hexdigest()
diff = obj['diff']
host = diff['host_dst']
qemu_img = qemu_img_util.QemuImg(dst_cloud.config.dst,
dst_cloud.config.migrate,
host)
diff_path = diff['path_dst']
backing_path = qemu_img.detect_backing_file(diff_path, None)
backing_dir = os.path.dirname(backing_path)
new_backing_path = os.path.join(backing_dir, new_backing_file)
qemu_img.diff_rebase(new_backing_path, diff_path)
|
mgrygoriev/CloudFerry
|
cloudferrylib/os/actions/transport_ephemeral.py
|
Python
|
apache-2.0
| 7,851
| 0
|
"""
Weather Underground PWS Metadata Scraping Module
Code to scrape PWS network metadata
"""
import pandas as pd
import urllib3
from bs4 import BeautifulSoup as BS
import numpy as np
import requests
# import time
def scrape_station_info(state="WA"):
"""
A script to scrape the station information published at the following URL:
https://www.wunderground.com/weatherstation/ListStations.asp?
selectedState=WA&selectedCountry=United+States&MR=1
:param state: US State by which to subset WU Station table
:return: numpy array with station info
"""
url = "https://www.wunderground.com/" \
"weatherstation/ListStations.asp?selectedState=" \
+ state + "&selectedCountry=United+States&MR=1"
raw_site_content = requests.get(url).content
soup = BS(raw_site_content, 'html.parser')
list_stations_info = soup.find_all("tr")
all_station_info = np.array(['id', 'neighborhood', 'city', 'type', 'lat',
'lon', 'elevation'])
for i in range(1, len(list_stations_info)): # start at 1 to omit headers
station_info = str(list_stations_info[i]).splitlines()
# pull out station info
station_id = station_info[1].split('ID=')[1].split('"')[0]
station_neighborhood = station_info[2].split('<td>')[1]
station_neighborhood = station_neighborhood.split('\xa0')[0]
station_city = station_info[3].split('<td>')[1].split('\xa0')[0]
station_type = station_info[4].split('station-type">')[1]
station_type = station_type.split('\xa0')[0]
station_id = station_id.strip()
station_neighborhood = station_neighborhood.strip()
station_city = station_city.strip()
station_type = station_type.strip()
# grab the latitude, longitude, and elevation metadata
lat, lon, elev = scrape_lat_lon_fly(station_id)
# put all data into an array
header = [station_id, station_neighborhood, station_city, station_type,
lat, lon, elev]
head_len = len(header)
all_station_info = np.vstack([all_station_info, header])
all_station_info = pd.DataFrame(all_station_info)
all_station_info.columns = all_station_info.ix[0, :]
# do some dataframe editing
all_station_info = all_station_info.drop(all_station_info
.index[0]).reset_index()
all_station_info = all_station_info.drop(all_station_info.columns[0],
axis=1)
return(all_station_info.to_csv('./data/station_data_from_FUN.csv'))
def scrape_lat_lon_fly(stationID):
"""
Add latitude, longitude and elevation data to the stationID that is
inputted as the argument to the function. Boom.
:param stationID: str
a unique identifier for the weather underground personal
weather station
:return: (latitude,longitude,elevation) as a tuple. Double Boom.
"""
http = urllib3.PoolManager(maxsize=10, block=True,
cert_reqs='CERT_REQUIRED')
try:
url = 'https://api.wunderground.com/weatherstation/' \
'WXDailyHistory.asp?ID={0}&format=XML'.format(stationID)
r = http.request('GET', url, preload_content=False)
soup = BS(r, 'xml')
lat = soup.find_all('latitude')[0].get_text()
long = soup.find_all('longitude')[0].get_text()
elev = soup.find_all('elevation')[0].get_text()
return(lat, long, elev)
except Exception as err:
lat = 'NA'
long = 'NA'
elev = 'NA'
return(lat, long, elev)
def subset_stations_by_coords(station_data, lat_range, lon_range):
"""
Subset station metadata by latitude and longitude
:param station_data_csv: str or Pandas.DataFrame
filename of csv with station metadata (from scrape_lat_lon)
or
Pandas.DataFrame with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: pandas.DataFrame with station metadata subset by lat/lon bounds
"""
lat_range.sort()
lon_range.sort()
if isinstance(station_data, str):
df = pd.read_csv(station_data, index_col=1)
df = df.dropna(subset=["Latitude", "Longitude"])
elif isinstance(station_data, pd.DataFrame):
df = station_data
else:
pass
# TODO: add exception here if type not supported
df = df[(df["Latitude"] >= lat_range[0]) &
(df["Latitude"] <= lat_range[1]) &
(df["Longitude"] >= lon_range[0]) &
(df["Longitude"] <= lon_range[1])]
return df
def get_station_ids_by_coords(station_data_csv, lat_range, lon_range):
"""
Wrapper around subset_stations_by_coords; returns just the IDs of the
stations in a box
:param station_data_csv: str
filename of csv with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: list of station IDs (strings)
"""
df = subset_stations_by_coords(station_data_csv, lat_range, lon_range)
return list(df.index)
# TESTING
# station_data_csv = "data/station_data.csv"
# lat_range = [47.4, 47.8]
# lon_range = [-122.5, -122.2]
# print(get_station_ids_by_coords(station_data_csv, lat_range, lon_range))
|
rexthompson/axwx
|
axwx/wu_metadata_scraping.py
|
Python
|
mit
| 5,613
| 0
|
"""
This page is in the table of contents.
Export is a craft tool to pick an export plugin, add information to the file name, and delete comments.
The export manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Export
==Operation==
The default 'Activate Export' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Add Descriptive Extension===
Default is off.
When selected, key profile values will be added as an extension to the gcode file. For example:
test.04hx06w_03fill_2cx2r_33EL.gcode
would mean:
* . (Carve section.)
* 04h = 'Layer Height (mm):' 0.4
* x
* 06w = 0.6 width i.e. 0.4 times 'Edge Width over Height (ratio):' 1.5
* _ (Fill section.)
* 03fill = 'Infill Solidity (ratio):' 0.3
* _ (Multiply section; if there is one column and one row then this section is not shown.)
* 2c = 'Number of Columns (integer):' 2
* x
* 2r = 'Number of Rows (integer):' 2.
* _ (Speed section.)
* 33EL = 'Feed Rate (mm/s):' 33.0 and 'Flow Rate Setting (float):' 33.0. If either value has a positive value after the decimal place then this is also shown, but if it is zero it is hidden. Also, if the values differ (which they shouldn't with 5D volumetrics) then each should be displayed separately. For example, 35.2E30L = 'Feed Rate (mm/s):' 35.2 and 'Flow Rate Setting (float):' 30.0.
===Add Profile Extension===
Default is off.
When selected, the current profile will be added to the file extension. For example:
test.my_profile_name.gcode
===Add Timestamp Extension===
Default is off.
When selected, the current date and time is added as an extension in format YYYYmmdd_HHMMSS (so it is sortable if one has many files). For example:
test.my_profile_name.20110613_220113.gcode
===Also Send Output To===
Default is empty.
Defines the output name for sending to a file or pipe. A common choice is stdout to print the output in the shell screen. Another common choice is stderr. With the empty default, nothing will be done. If the value is anything else, the output will be written to that file name.
===Analyze Gcode===
Default is on.
When selected, the penultimate gcode will be sent to the analyze plugins to be analyzed and viewed.
===Comment Choice===
Default is 'Delete All Comments'.
====Do Not Delete Comments====
When selected, export will not delete comments. Crafting comments slow down the processing in many firmware types, which leads to pauses and therefore a lower quality print.
====Delete Crafting Comments====
When selected, export will delete the time consuming crafting comments, but leave the initialization comments. Since the crafting comments are deleted, there are no pauses during extrusion. The remaining initialization comments provide some useful information for the analyze tools.
====Delete All Comments====
When selected, export will delete all comments. The comments are not necessary to run a fabricator. Some printers do not support comments at all so the safest way is choose this option.
===Export Operations===
Export presents the user with a choice of the export plugins in the export_plugins folder. The chosen plugin will then modify the gcode or translate it into another format. There is also the "Do Not Change Output" choice, which will not change the output. An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function.
===File Extension===
Default is gcode.
Defines the file extension added to the name of the output file. The output file will be named as originalname_export.extension so if you are processing XYZ.stl the output will by default be XYZ_export.gcode
===Name of Replace File===
Default is replace.csv.
When export is exporting the code, if there is a tab separated file with the name of the "Name of Replace File" setting, it will replace the string in the first column by its replacement in the second column. If there is nothing in the second column, the first column string will be deleted, if this leads to an empty line, the line will be deleted. If there are replacement columns after the second, they will be added as extra lines of text. There is an example file replace_example.csv to demonstrate the tab separated format, which can be edited in a text editor or a spreadsheet.
Export looks for the alteration file in the alterations folder in the .skeinforge folder in the home directory. Export does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
===Save Penultimate Gcode===
Default is off.
When selected, export will save the gcode file with the suffix '_penultimate.gcode' just before it is exported. This is useful because the code after it is exported could be in a form which the viewers can not display well.
==Examples==
The following examples export the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and export.py.
> python export.py
This brings up the export dialog.
> python export.py Screw Holder Bottom.stl
The export tool is parsing the file:
Screw Holder Bottom.stl
..
The export tool has created the file:
.. Screw Holder Bottom_export.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_analyze
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cStringIO
import os
import sys
import time
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Gary Hodgson <http://garyhodgson.com/reprap/2011/06/hacking-skeinforge-export-module/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedTextFromText(gcodeText, repository=None):
'Export a gcode linear move text.'
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'export'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(ExportRepository())
if not repository.activateExport.value:
return gcodeText
return ExportSkein().getCraftedGcode(repository, gcodeText)
def getDescriptionCarve(lines):
'Get the description for carve.'
descriptionCarve = ''
layerThicknessString = getSettingString(lines, 'carve', 'Layer Height')
if layerThicknessString != None:
descriptionCarve += layerThicknessString.replace('.', '') + 'h'
edgeWidthString = getSettingString(lines, 'carve', 'Edge Width over Height')
if edgeWidthString != None:
descriptionCarve += 'x%sw' % str(float(edgeWidthString) * float(layerThicknessString)).replace('.', '')
return descriptionCarve
def getDescriptionFill(lines):
'Get the description for fill.'
activateFillString = getSettingString(lines, 'fill', 'Activate Fill')
if activateFillString == None or activateFillString == 'False':
return ''
infillSolidityString = getSettingString(lines, 'fill', 'Infill Solidity')
return '_' + infillSolidityString.replace('.', '') + 'fill'
def getDescriptionMultiply(lines):
'Get the description for multiply.'
activateMultiplyString = getSettingString(lines, 'multiply', 'Activate Multiply')
if activateMultiplyString == None or activateMultiplyString == 'False':
return ''
columnsString = getSettingString(lines, 'multiply', 'Number of Columns')
rowsString = getSettingString(lines, 'multiply', 'Number of Rows')
if columnsString == '1' and rowsString == '1':
return ''
return '_%scx%sr' % (columnsString, rowsString)
def getDescriptionSpeed(lines):
'Get the description for speed.'
activateSpeedString = getSettingString(lines, 'speed', 'Activate Speed')
if activateSpeedString == None or activateSpeedString == 'False':
return ''
feedRateString = getSettingString(lines, 'speed', 'Feed Rate')
flowRateString = getSettingString(lines, 'speed', 'Flow Rate')
if feedRateString == flowRateString:
return '_%sEL' % feedRateString.replace('.0', '')
return '_%sE%sL' % (feedRateString.replace('.0', ''), flowRateString.replace('.0', ''))
def getDescriptiveExtension(gcodeText):
'Get the descriptive extension.'
lines = archive.getTextLines(gcodeText)
return '.' + getDescriptionCarve(lines) + getDescriptionFill(lines) + getDescriptionMultiply(lines) + getDescriptionSpeed(lines)
def getDistanceGcode(exportText):
'Get gcode lines with distance variable added, this is for if ever there is distance code.'
lines = archive.getTextLines(exportText)
oldLocation = None
for line in lines:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = None
if len(splitLine) > 0:
firstWord = splitLine[0]
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(oldLocation, splitLine)
if oldLocation != None:
distance = location.distance(oldLocation)
oldLocation = location
return exportText
def getFirstValue(gcodeText, word):
'Get the value from the first line which starts with the given word.'
for line in archive.getTextLines(gcodeText):
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if gcodec.getFirstWord(splitLine) == word:
return splitLine[1]
return ''
def getNewRepository():
'Get new repository.'
return ExportRepository()
def getReplaceableExportGcode(nameOfReplaceFile, replaceableExportGcode):
'Get text with strings replaced according to replace.csv file.'
replaceLines = settings.getAlterationLines(nameOfReplaceFile)
if len(replaceLines) < 1:
return replaceableExportGcode
for replaceLine in replaceLines:
splitLine = replaceLine.replace('\\n', '\t').split('\t')
if len(splitLine) > 0:
replaceableExportGcode = replaceableExportGcode.replace(splitLine[0], '\n'.join(splitLine[1 :]))
output = cStringIO.StringIO()
gcodec.addLinesToCString(output, archive.getTextLines(replaceableExportGcode))
return output.getvalue()
def getSelectedPluginModule( plugins ):
'Get the selected plugin module.'
for plugin in plugins:
if plugin.value:
return archive.getModuleWithDirectoryPath( plugin.directoryPath, plugin.name )
return None
def getSettingString(lines, procedureName, settingNameStart):
'Get the setting value from the lines, return None if there is no setting starting with that name.'
settingNameStart = settingNameStart.replace(' ', '_')
for line in lines:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = None
if len(splitLine) > 0:
firstWord = splitLine[0]
if firstWord == '(<setting>':
if len(splitLine) > 4:
if splitLine[1] == procedureName and splitLine[2].startswith(settingNameStart):
return splitLine[3]
elif firstWord == '(</settings>)':
return None
return None
def sendOutputTo(outputTo, text):
'Send output to a file or a standard output.'
if outputTo.endswith('stderr'):
sys.stderr.write(text)
sys.stderr.write('\n')
sys.stderr.flush()
return
if outputTo.endswith('stdout'):
sys.stdout.write(text)
sys.stdout.write('\n')
sys.stdout.flush()
return
archive.writeFileText(outputTo, text)
def writeOutput(fileName, shouldAnalyze=True):
'Export a gcode linear move file.'
if fileName == '':
return None
repository = ExportRepository()
settings.getReadRepository(repository)
startTime = time.time()
print('File ' + archive.getSummarizedFileName(fileName) + ' is being chain exported.')
fileNameSuffix = fileName[: fileName.rfind('.')]
if repository.addExportSuffix.value:
fileNameSuffix += '_export'
gcodeText = gcodec.getGcodeFileText(fileName, '')
procedures = skeinforge_craft.getProcedures('export', gcodeText)
gcodeText = skeinforge_craft.getChainTextFromProcedures(fileName, procedures[: -1], gcodeText)
if gcodeText == '':
return None
if repository.addProfileExtension.value:
fileNameSuffix += '.' + getFirstValue(gcodeText, '(<profileName>')
if repository.addDescriptiveExtension.value:
fileNameSuffix += getDescriptiveExtension(gcodeText)
if repository.addTimestampExtension.value:
fileNameSuffix += '.' + getFirstValue(gcodeText, '(<timeStampPreface>')
fileNameSuffix += '.' + repository.fileExtension.value
fileNamePenultimate = fileName[: fileName.rfind('.')] + '_penultimate.gcode'
filePenultimateWritten = False
if repository.savePenultimateGcode.value:
archive.writeFileText(fileNamePenultimate, gcodeText)
filePenultimateWritten = True
print('The penultimate file is saved as ' + archive.getSummarizedFileName(fileNamePenultimate))
exportGcode = getCraftedTextFromText(gcodeText, repository)
window = None
if shouldAnalyze and repository.analyzeGcode.value:
window = skeinforge_analyze.writeOutput(fileName, fileNamePenultimate, fileNameSuffix, filePenultimateWritten, gcodeText)
replaceableExportGcode = None
selectedPluginModule = getSelectedPluginModule(repository.exportPlugins)
if selectedPluginModule == None:
replaceableExportGcode = exportGcode
else:
if selectedPluginModule.globalIsReplaceable:
replaceableExportGcode = selectedPluginModule.getOutput(exportGcode)
else:
selectedPluginModule.writeOutput(fileNameSuffix, exportGcode)
if replaceableExportGcode != None:
replaceableExportGcode = getReplaceableExportGcode(repository.nameOfReplaceFile.value, replaceableExportGcode)
archive.writeFileText( fileNameSuffix, replaceableExportGcode )
print('The exported file is saved as ' + archive.getSummarizedFileName(fileNameSuffix))
if repository.alsoSendOutputTo.value != '':
if replaceableExportGcode == None:
replaceableExportGcode = selectedPluginModule.getOutput(exportGcode)
sendOutputTo(repository.alsoSendOutputTo.value, replaceableExportGcode)
print('It took %s to export the file.' % euclidean.getDurationString(time.time() - startTime))
return window
class ExportRepository:
'A class to handle the export settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.export.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Export', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Export')
self.activateExport = settings.BooleanSetting().getFromValue('Activate Export', self, True)
self.addDescriptiveExtension = settings.BooleanSetting().getFromValue('Add Descriptive Extension', self, False)
self.addExportSuffix = settings.BooleanSetting().getFromValue('Add Export Suffix', self, True)
self.addProfileExtension = settings.BooleanSetting().getFromValue('Add Profile Extension', self, False)
self.addTimestampExtension = settings.BooleanSetting().getFromValue('Add Timestamp Extension', self, False)
self.alsoSendOutputTo = settings.StringSetting().getFromValue('Also Send Output To:', self, '')
self.analyzeGcode = settings.BooleanSetting().getFromValue('Analyze Gcode', self, True)
self.commentChoice = settings.MenuButtonDisplay().getFromName('Comment Choice:', self)
self.doNotDeleteComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Do Not Delete Comments', self, False)
self.deleteCraftingComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Delete Crafting Comments', self, False)
self.deleteAllComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Delete All Comments', self, True)
exportPluginsFolderPath = archive.getAbsoluteFrozenFolderPath(archive.getCraftPluginsDirectoryPath('export.py'), 'export_plugins')
exportStaticDirectoryPath = os.path.join(exportPluginsFolderPath, 'static_plugins')
exportPluginFileNames = archive.getPluginFileNamesFromDirectoryPath(exportPluginsFolderPath)
exportStaticPluginFileNames = archive.getPluginFileNamesFromDirectoryPath(exportStaticDirectoryPath)
self.exportLabel = settings.LabelDisplay().getFromName('Export Operations: ', self)
self.exportPlugins = []
exportLatentStringVar = settings.LatentStringVar()
self.doNotChangeOutput = settings.RadioCapitalized().getFromRadio(exportLatentStringVar, 'Do Not Change Output', self, True)
self.doNotChangeOutput.directoryPath = None
allExportPluginFileNames = exportPluginFileNames + exportStaticPluginFileNames
for exportPluginFileName in allExportPluginFileNames:
exportPlugin = None
if exportPluginFileName in exportPluginFileNames:
path = os.path.join(exportPluginsFolderPath, exportPluginFileName)
exportPlugin = settings.RadioCapitalizedButton().getFromPath(exportLatentStringVar, exportPluginFileName, path, self, False)
exportPlugin.directoryPath = exportPluginsFolderPath
else:
exportPlugin = settings.RadioCapitalized().getFromRadio(exportLatentStringVar, exportPluginFileName, self, False)
exportPlugin.directoryPath = exportStaticDirectoryPath
self.exportPlugins.append(exportPlugin)
self.fileExtension = settings.StringSetting().getFromValue('File Extension:', self, 'gcode')
self.nameOfReplaceFile = settings.StringSetting().getFromValue('Name of Replace File:', self, 'replace.csv')
self.savePenultimateGcode = settings.BooleanSetting().getFromValue('Save Penultimate Gcode', self, False)
self.executeTitle = 'Export'
def execute(self):
'Export button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class ExportSkein:
'A class to export a skein of extrusions.'
def __init__(self):
self.crafting = False
self.decimalPlacesExported = 2
self.output = cStringIO.StringIO()
def addLine(self, line):
'Add a line of text and a newline to the output.'
if line != '':
self.output.write(line + '\n')
def getCraftedGcode( self, repository, gcodeText ):
'Parse gcode text and store the export gcode.'
self.repository = repository
lines = archive.getTextLines(gcodeText)
for line in lines:
self.parseLine(line)
return self.output.getvalue()
def getLineWithTruncatedNumber(self, character, line, splitLine):
'Get a line with the number after the character truncated.'
numberString = gcodec.getStringFromCharacterSplitLine(character, splitLine)
if numberString == None:
return line
roundedNumberString = euclidean.getRoundedToPlacesString(self.decimalPlacesExported, float(numberString))
return gcodec.getLineWithValueString(character, line, splitLine, roundedNumberString)
def parseLine(self, line):
'Parse a gcode line.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == '(</crafting>)':
self.crafting = False
elif firstWord == '(<decimalPlacesCarried>':
self.decimalPlacesExported = int(splitLine[1]) - 1
if self.repository.deleteAllComments.value or (self.repository.deleteCraftingComments.value and self.crafting):
if firstWord[0] == '(':
return
else:
line = line.split(';')[0].split('(')[0].strip()
if firstWord == '(<crafting>)':
self.crafting = True
if firstWord == '(</extruderInitialization>)':
self.addLine(gcodec.getTagBracketedProcedure('export'))
if firstWord != 'G1' and firstWord != 'G2' and firstWord != 'G3' :
self.addLine(line)
return
line = self.getLineWithTruncatedNumber('X', line, splitLine)
line = self.getLineWithTruncatedNumber('Y', line, splitLine)
line = self.getLineWithTruncatedNumber('Z', line, splitLine)
line = self.getLineWithTruncatedNumber('I', line, splitLine)
line = self.getLineWithTruncatedNumber('J', line, splitLine)
line = self.getLineWithTruncatedNumber('R', line, splitLine)
self.addLine(line)
def main():
'Display the export dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
|
nophead/Skeinforge50plus
|
skeinforge_application/skeinforge_plugins/craft_plugins/export.py
|
Python
|
agpl-3.0
| 20,837
| 0.017853
|
import logging
import os
import shutil
import subprocess
DEVNULL = open(os.devnull, 'wb')
class ShellError(Exception):
def __init__(self, command, err_no, message=None):
self.command = command
self.errno = err_no
self.message = message
def __str__(self):
string = "Command '%s' failed with exit code %d" % (self.command, self.errno)
if self.message is not None:
string += ': ' + repr(self.message)
return string
def __repr__(self):
return self.__str__()
def shell_exec(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, background=False, env=None):
str_cmd = cmd if isinstance(cmd, str) else ' '.join(cmd)
logging.getLogger('shell_exec').debug(str_cmd)
message = None
if background:
if stdout == subprocess.PIPE:
stdout = DEVNULL
if stderr == subprocess.PIPE:
stderr = DEVNULL
elif stdin is not None and isinstance(stdin, str):
message = stdin
stdin = subprocess.PIPE
process = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=isinstance(cmd, str), env=env)
stdout_dump = None
stderr_dump = None
return_code = 0
if message is not None or stdout == subprocess.PIPE or stderr == subprocess.PIPE:
stdout_dump, stderr_dump = process.communicate(message)
return_code = process.returncode
elif not background:
return_code = process.wait()
if background:
return process
else:
if stdout_dump is not None:
stdout_dump = stdout_dump.decode('utf-8')
if stderr_dump is not None:
stderr_dump = stderr_dump.decode('utf-8')
if return_code != 0:
raise ShellError(str_cmd, return_code, stderr_dump)
else:
return stdout_dump, stderr_dump
def mem_size(megabytes=True):
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
return mem_bytes / (1024. ** 2) if megabytes else mem_bytes
def lc(filename):
with open(filename) as stream:
count = 0
for _ in stream:
count += 1
return count
def cat(files, output, buffer_size=10 * 1024 * 1024):
with open(output, 'wb') as blob:
for f in files:
with open(f, 'rb') as source:
shutil.copyfileobj(source, blob, buffer_size)
|
ModernMT/MMT
|
cli/utils/osutils.py
|
Python
|
apache-2.0
| 2,402
| 0.001665
|
import numpy
from chainer.backends import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.01
_default_hyperparam.alpha = 0.99
_default_hyperparam.eps = 1e-8
_default_hyperparam.eps_inside_sqrt = False
class RMSpropRule(optimizer.UpdateRule):
"""Update rule for RMSprop.
See :class:`~chainer.optimizers.RMSprop` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
"""
def __init__(self, parent_hyperparam=None, lr=None, alpha=None, eps=None,
eps_inside_sqrt=None):
super(RMSpropRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if alpha is not None:
self.hyperparam.alpha = alpha
if eps is not None:
self.hyperparam.eps = eps
if eps_inside_sqrt is not None:
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
def init_state(self, param):
xp = cuda.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['ms'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
ms = self.state['ms']
ms *= hp.alpha
ms += (1 - hp.alpha) * grad * grad
if hp.eps_inside_sqrt:
denom = numpy.sqrt(ms + eps)
else:
denom = numpy.sqrt(ms) + eps
param.data -= hp.lr * grad / denom
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
if hp.eps_inside_sqrt:
denom = 'sqrt(ms + eps)'
else:
denom = 'sqrt(ms) + eps'
kernel = cuda.elementwise(
'T grad, T lr, T alpha, T eps',
'T param, T ms',
'''ms = alpha * ms + (1 - alpha) * grad * grad;
param -= lr * grad / ({});'''.format(denom),
'rmsprop')
kernel(grad, self.hyperparam.lr, self.hyperparam.alpha,
eps, param.data, self.state['ms'])
class RMSprop(optimizer.GradientMethod):
"""RMSprop optimizer.
See: T. Tieleman and G. Hinton (2012). Lecture 6.5 - rmsprop, COURSERA:
Neural Networks for Machine Learning.
Args:
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
"""
def __init__(self, lr=_default_hyperparam.lr,
alpha=_default_hyperparam.alpha, eps=_default_hyperparam.eps,
eps_inside_sqrt=_default_hyperparam.eps_inside_sqrt):
super(RMSprop, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.alpha = alpha
self.hyperparam.eps = eps
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
lr = optimizer.HyperparameterProxy('lr')
alpha = optimizer.HyperparameterProxy('alpha')
eps = optimizer.HyperparameterProxy('eps')
eps_inside_sqrt = optimizer.HyperparameterProxy('eps_inside_sqrt')
def create_update_rule(self):
return RMSpropRule(self.hyperparam)
|
rezoo/chainer
|
chainer/optimizers/rmsprop.py
|
Python
|
mit
| 4,921
| 0
|
"""
Kodi urlresolver plugin
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class TudouResolver(UrlResolver):
name = 'Tudou'
domains = ['tudou.com']
pattern = '(?://|\.)(tudou\.com)/programs/view/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
swf = re.findall('(http.+?\.swf)', html)[0]
sid = re.findall('areaCode\s*:\s*"(\d+)', html)[0]
oid = re.findall('"k"\s*:\s*(\d+)', html)[0]
f_url = 'http://v2.tudou.com/f?id=%s&sid=%s&hd=3&sj=1' % (oid, sid)
headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': swf}
html = self.net.http_GET(f_url, headers=headers).content
url = re.findall('>(http.+?)<', html)[0]
url = url.replace('&', '&')
video = self.net.http_HEAD(url, headers=headers).get_headers()
video = [i for i in video if 'video' in i]
if not video:
raise ResolverError('File not found')
url += '|%s' % urllib.urlencode(headers)
return url
raise ResolverError('Unable to locate link')
def get_url(self, host, media_id):
return 'http://www.tudou.com/programs/view/%s/' % media_id
|
TheWardoctor/Wardoctors-repo
|
script.module.urlresolver/lib/urlresolver/plugins/tudou.py
|
Python
|
apache-2.0
| 2,082
| 0.004803
|
import warnings
from .file import File, open, read, create, write, CfitsioError
try:
from healpix import read_map, read_mask
except:
warnings.warn('Cannot import read_map and read_mask if healpy is not installed')
pass
|
zonca/pycfitsio
|
pycfitsio/__init__.py
|
Python
|
gpl-3.0
| 234
| 0.012821
|
### Copyright (C) 2010 Peter Williams <peter_ono@users.sourceforge.net>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Select/display which patch guards are in force."""
import sys
from . import cli_args
from . import db_utils
from . import msg
PARSER = cli_args.SUB_CMD_PARSER.add_parser(
"select",
description=_("Display/select which patch guards are in force."),
epilog=_("""When invoked with no arguments the currently selected guards are listed."""),
)
GROUP = PARSER.add_mutually_exclusive_group()
GROUP.add_argument(
"-n", "--none",
help=_("Disable all guards."),
dest="opt_none",
action="store_true",
)
GROUP.add_argument(
"-s", "--set",
help=_("the list of guards to be enabled/selected."),
dest="guards",
metavar="guard",
action="append",
)
def run_select(args):
"""Execute the "select" sub command using the supplied args"""
PM = db_utils.get_pm_db()
db_utils.set_report_context(verbose=True)
if args.opt_none:
return PM.do_select_guards(None)
elif args.guards:
return PM.do_select_guards(args.guards)
else:
selected_guards = PM.get_selected_guards()
for guard in sorted(selected_guards):
sys.stdout.write(guard + "\n")
return 0
PARSER.set_defaults(run_cmd=run_select)
|
pwil3058/darning
|
darning/cli/subcmd_select.py
|
Python
|
gpl-2.0
| 1,953
| 0.00768
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ctypes
from ctypes import util as ctypes_util
_CDLL = None
def get_cdll():
global _CDLL
if not _CDLL:
# NOTE(ralonsoh): from https://docs.python.org/3.6/library/
# ctypes.html#ctypes.PyDLL: "Instances of this class behave like CDLL
# instances, except that the Python GIL is not released during the
# function call, and after the function execution the Python error
# flag is checked."
# Check https://bugs.launchpad.net/neutron/+bug/1870352
_CDLL = ctypes.PyDLL(ctypes_util.find_library('c'), use_errno=True)
return _CDLL
|
mahak/neutron
|
neutron/privileged/agent/linux/__init__.py
|
Python
|
apache-2.0
| 1,208
| 0
|
try:
#comment
x = 1<caret>
y = 2
except:
pass
|
asedunov/intellij-community
|
python/testData/refactoring/unwrap/tryUnwrap_before.py
|
Python
|
apache-2.0
| 61
| 0.081967
|
# -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-02
# Original data: Djurišić and Li 1999, https://doi.org/10.1063/1.369370
import numpy as np
import matplotlib.pyplot as plt
# LD model parameters - Normal polarization (ordinary)
ωp = 27
εinf = 1.070
f0 = 0.014
Γ0 = 6.365
ω0 = 0
α0 = 0
f1 = 0.073
Γ1 = 4.102
ω1 = 0.275
α1 = 0.505
f2 = 0.056
Γ2 = 7.328
ω2 = 3.508
α2 = 7.079
f3 = 0.069
Γ3 = 1.414
ω3 = 4.451
α3 = 0.362
f4 = 0.005
Γ4 = 0.46 # 0.046 in the original paper!
ω4 = 13.591
α4 = 7.426
f5 = 0.262
Γ5 = 1.862
ω5 = 14.226
α5 = 3.82e-4
f6 = 0.460
Γ6 = 11.922
ω6 = 15.550
α6 = 1.387
f7 = 0.200
Γ7 = 39.091
ω7 = 32.011
α7 = 28.963
def LD(ω):
ε = εinf;
Γ = Γ0*np.exp(-α0*((ω-ω0)/Γ0)**2)
ε -= f0*ωp**2 / ((ω**2-ω0**2)+1j*ω*Γ)
Γ = Γ1*np.exp(-α1*((ω-ω1)/Γ1)**2)
ε -= f1*ωp**2 / ((ω**2-ω1**2)+1j*ω*Γ)
Γ = Γ2*np.exp(-α2*((ω-ω2)/Γ2)**2)
ε -= f2*ωp**2 / ((ω**2-ω2**2)+1j*ω*Γ)
Γ = Γ3*np.exp(-α3*((ω-ω3)/Γ3)**2)
ε -= f3*ωp**2 / ((ω**2-ω3**2)+1j*ω*Γ)
Γ = Γ4*np.exp(-α4*((ω-ω4)/Γ4)**2)
ε -= f4*ωp**2 / ((ω**2-ω4**2)+1j*ω*Γ)
Γ = Γ5*np.exp(-α5*((ω-ω5)/Γ5)**2)
ε -= f5*ωp**2 / ((ω**2-ω5**2)+1j*ω*Γ)
Γ = Γ6*np.exp(-α6*((ω-ω6)/Γ6)**2)
ε -= f6*ωp**2 / ((ω**2-ω6**2)+1j*ω*Γ)
Γ = Γ7*np.exp(-α7*((ω-ω7)/Γ7)**2)
ε -= f7*ωp**2 / ((ω**2-ω7**2)+1j*ω*Γ)
return ε
ev_min=0.12
ev_max=40
npoints=1000
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
ε = LD(eV)
n = (ε**.5).real
k = (ε**.5).imag
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, ε.imag, label="ε2")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(2)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(3)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Djurisic 1999 - Graphite-o.py
|
Python
|
gpl-3.0
| 2,735
| 0.025077
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-01 15:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pec', '0006_auto_20170601_0719'),
]
operations = [
migrations.AddField(
model_name='cours',
name='type',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='cours',
name='objectifs_evaluateurs',
field=models.ManyToManyField(blank=True, to='pec.ObjectifEvaluateur'),
),
]
|
alazo/ase
|
pec/migrations/0007_auto_20170601_1557.py
|
Python
|
agpl-3.0
| 653
| 0.001531
|
# -*- coding:utf-8 -*-
from django import forms
try:
from django.utils.encoding import smart_unicode as smart_text
except ImportError:
from django.utils.encoding import smart_text
from cached_modelforms.tests.utils import SettingsTestCase
from cached_modelforms.tests.models import SimpleModel
from cached_modelforms import (
CachedModelChoiceField, CachedModelMultipleChoiceField)
class TestFields(SettingsTestCase):
def setUp(self):
self.settings_manager.set(INSTALLED_APPS=('cached_modelforms.tests',))
self.obj1 = SimpleModel.objects.create(name='name1')
self.obj2 = SimpleModel.objects.create(name='name2')
self.obj3 = SimpleModel.objects.create(name='name3')
self.cached_list = [self.obj1, self.obj2, self.obj3]
class FormSingle(forms.Form):
obj = CachedModelChoiceField(
objects=lambda:self.cached_list,
required=False
)
class FormMultiple(forms.Form):
obj = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list,
required=False
)
self.FormSingle = FormSingle
self.FormMultiple = FormMultiple
def test_modelchoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelChoiceField(objects=lambda:self.cached_list)
as_iterable = CachedModelChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelChoiceField(objects=lambda:dict(list_of_tuples))
choices_without_empty_label = as_list.choices[:]
if as_list.empty_label is not None:
choices_without_empty_label.pop(0)
# make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices
)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects
)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
# ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
choices_without_empty_label,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list]
)
def test_modelmultiplechoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list
)
as_iterable = CachedModelMultipleChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelMultipleChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelMultipleChoiceField(objects=dict(list_of_tuples))
# make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
# ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
as_list.choices,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list]
)
def test_modelchoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormSingle({'obj': smart_text(self.obj1.pk)})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], self.obj1)
# no value
form = self.FormSingle({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], None)
# invalid value
form = self.FormSingle({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelmultiplechoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), smart_text(self.obj2.pk)]})
self.assertTrue(form.is_valid())
self.assertEqual(set(form.cleaned_data['obj']), set([self.obj1, self.obj2]))
# no value
form = self.FormMultiple({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], [])
# invalid value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), '-1']})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
# invalid list
form = self.FormMultiple({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelchoicefield_objects_assignment(self):
field = CachedModelChoiceField(objects=self.cached_list)
field2 = CachedModelChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)
def test_modelmultiplechoicefield_objects_assignment(self):
field = CachedModelMultipleChoiceField(objects=self.cached_list)
field2 = CachedModelMultipleChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)
|
drtyrsa/django-cached-modelforms
|
cached_modelforms/tests/test_fields.py
|
Python
|
bsd-2-clause
| 6,699
| 0.001941
|
# coding: utf-8
import copy
from google.appengine.ext import ndb
import flask
from apps import auth
from apps.auth import helpers
from core import task
from core import util
import config
import forms
import models
bp = flask.Blueprint(
'user',
__name__,
url_prefix='/user',
template_folder='templates',
)
###############################################################################
# User List
###############################################################################
@bp.route('/', endpoint='list')
@auth.admin_required
def user_list():
user_dbs, user_cursor, prev_cursor = models.User.get_dbs(
email=util.param('email')
)
permissions = list(forms.UserUpdateForm._permission_choices)
permissions += util.param('permissions', list) or []
return flask.render_template(
'user/admin/list.html',
html_class='user-list',
title='User List',
user_dbs=user_dbs,
next_url=util.generate_next_url(user_cursor),
prev_url=util.generate_next_url(prev_cursor),
permissions=sorted(set(permissions)),
api_url=flask.url_for('api.users')
)
@bp.route('/<int:user_id>/update/', methods=['GET', 'POST'], endpoint='update')
@auth.admin_required
def user_update(user_id):
user_db = models.User.get_by_id(user_id)
if not user_db:
flask.abort(404)
form = forms.UserUpdateForm(obj=user_db)
for permission in user_db.permissions:
form.permissions.choices.append((permission, permission))
form.permissions.choices = sorted(set(form.permissions.choices))
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append('This username is invalid.')
elif not models.User.is_username_available(form.username.data, user_db.key):
form.username.errors.append('This username is already taken.')
else:
form.populate_obj(user_db)
if auth.current_user_id() == user_db.key.id():
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for(
'user.list', order='-modified', active=user_db.active,
))
return flask.render_template(
'user/admin/update.html',
title=user_db.name,
html_class='user-update',
form=form,
user_db=user_db,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe())
)
@bp.route('/verify_email/<token>/')
@auth.login_required
def verify_email(token):
user_db = auth.current_user_db()
if user_db.token != token:
flask.flash('That link is either invalid or expired.', category='danger')
return flask.redirect(flask.url_for('user.profile_update'))
user_db.verified = True
user_db.token = util.uuid()
user_db.put()
flask.flash('Hooray! Your email is now verified.', category='success')
return flask.redirect(flask.url_for('user.profile_update'))
@bp.route('/merge/', methods=['GET', 'POST'])
@auth.admin_required
def merge():
user_keys = util.param('user_keys', list)
if not user_keys:
flask.abort(400)
user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys]
user_dbs = ndb.get_multi(user_db_keys)
if len(user_dbs) < 2:
flask.abort(400)
user_dbs.sort(key=lambda user_db: user_db.created)
merged_user_db = user_dbs[0]
auth_ids = []
permissions = []
is_admin = False
is_active = False
for user_db in user_dbs:
auth_ids.extend(user_db.auth_ids)
permissions.extend(user_db.permissions)
is_admin = is_admin or user_db.admin
is_active = is_active or user_db.active
if user_db.key.urlsafe() == util.param('user_key'):
merged_user_db = user_db
auth_ids = sorted(list(set(auth_ids)))
permissions = sorted(list(set(permissions)))
merged_user_db.permissions = permissions
merged_user_db.admin = is_admin
merged_user_db.active = is_active
merged_user_db.verified = False
form_obj = copy.deepcopy(merged_user_db)
form_obj.user_key = merged_user_db.key.urlsafe()
form_obj.user_keys = ','.join(user_keys)
form = forms.UserMergeForm(obj=form_obj)
if form.validate_on_submit():
form.populate_obj(merged_user_db)
merged_user_db.auth_ids = auth_ids
merged_user_db.put()
deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key]
merge_user_dbs(merged_user_db, deprecated_keys)
return flask.redirect(
flask.url_for('user.update', user_id=merged_user_db.key.id()),
)
return flask.render_template(
'user/admin/merge.html',
title='Merge Users',
html_class='user-merge',
user_dbs=user_dbs,
merged_user_db=merged_user_db,
form=form,
auth_ids=auth_ids,
api_url=flask.url_for('api.users', user_keys=','.join(user_keys))
)
@ndb.transactional(xg=True)
def merge_user_dbs(user_db, deprecated_keys):
# TODO: Merge possible user data before handling deprecated users
deprecated_dbs = ndb.get_multi(deprecated_keys)
for deprecated_db in deprecated_dbs:
deprecated_db.auth_ids = []
deprecated_db.active = False
deprecated_db.verified = False
if not deprecated_db.username.startswith('_'):
deprecated_db.username = '_%s' % deprecated_db.username
ndb.put_multi(deprecated_dbs)
@bp.route('/profile/')
@auth.login_required
def profile():
user_db = auth.current_user_db()
return flask.render_template(
'user/profile/index.html',
title=user_db.name,
html_class='profile-view',
user_db=user_db,
has_json=True,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe()),
)
@bp.route('/profile/update/', methods=['GET', 'POST'])
@auth.login_required
def profile_update():
user_db = auth.current_user_db()
form = forms.ProfileUpdateForm(obj=user_db)
if form.validate_on_submit():
email = form.email.data
if email and not user_db.is_email_available(email, user_db.key):
form.email.errors.append('This email is already taken.')
if not form.errors:
send_verification = not user_db.token or user_db.email != email
form.populate_obj(user_db)
if send_verification:
user_db.verified = False
task.verify_email_notification(user_db)
user_db.put()
return flask.redirect(flask.url_for('pages.welcome'))
return flask.render_template(
'user/profile/update.html',
title=user_db.name,
html_class='profile',
form=form,
user_db=user_db,
)
@bp.route('/profile/password/', methods=['GET', 'POST'])
@auth.login_required
def profile_password():
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
user_db = auth.current_user_db()
form = forms.ProfilePasswordForm(obj=user_db)
if form.validate_on_submit():
errors = False
old_password = form.old_password.data
new_password = form.new_password.data
if new_password or old_password:
if user_db.password_hash:
if helpers.password_hash(user_db, old_password) != user_db.password_hash:
form.old_password.errors.append('Invalid current password')
errors = True
if not errors and old_password and not new_password:
form.new_password.errors.append('This field is required.')
errors = True
if not (form.errors or errors):
user_db.password_hash = helpers.password_hash(user_db, new_password)
flask.flash('Your password has been changed.', category='success')
if not (form.errors or errors):
user_db.put()
return flask.redirect(flask.url_for('user.profile'))
return flask.render_template(
'user/profile/password.html',
title=user_db.name,
html_class='profile-password',
form=form,
user_db=user_db,
)
|
gmist/gae-de-init
|
main/apps/user/views.py
|
Python
|
mit
| 7,664
| 0.010569
|
from django.conf import settings
from geopy import distance, geocoders
import pygeoip
def get_geodata_by_ip(addr):
gi = pygeoip.GeoIP(settings.GEO_CITY_FILE, pygeoip.MEMORY_CACHE)
geodata = gi.record_by_addr(addr)
return geodata
def get_geodata_by_region(*args):
gn = geocoders.GeoNames()
return gn.geocode(' '.join(args), exactly_one=False)[0]
def get_distance(location1, location2):
"""
Calculate distance between two locations, given the (lat, long) of each.
Required Arguments:
location1
A tuple of (lat, long).
location2
A tuple of (lat, long).
"""
return distance.distance(location1, location2).miles
|
iuscommunity/dmirr
|
src/dmirr.hub/dmirr/hub/lib/geo.py
|
Python
|
gpl-2.0
| 735
| 0.013605
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A handler that displays servers and their instances."""
from google.appengine.tools.devappserver2.admin import admin_request_handler
class ModulesHandler(admin_request_handler.AdminRequestHandler):
def get(self):
values = {'modules': self.dispatcher.modules}
self.response.write(self.render('modules.html', values))
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/admin/modules_handler.py
|
Python
|
bsd-3-clause
| 934
| 0.001071
|
def isPrime(num):
if num <= 1:
return False
i = 2
while i < num / 2 + 1:
if num % i == 0:
return False
i += 1
return True
big = 600851475143
test = 1
while test < big:
test += 1
if big % test == 0:
print(test, ' divides evenly')
div = big / test
print('candidate ', div)
if isPrime(div):
print('found ', div)
break
|
rck109d/projectEuler
|
src/euler/p3.py
|
Python
|
lgpl-3.0
| 433
| 0
|
#!/usr/bin/python3
from scrapers.scrape import scrape_page
# if you want to use this scraper without the RESTful api webservice then
# change this import: from scrape import scrape_page
import re
try:
import pandas as pd
pandasImported = True
except ImportError:
pandasImported = False
BASE_URL = "http://finviz.com/quote.ashx?t="
VALUE_NAMES_XPATH = '//*[@class="snapshot-td2-cp"]/text()'
VALUES_XPATH = '//*[@class="snapshot-td2"]/b/text() | //*[@class="snapshot-td2"]/b/*/text()'
def get_statistics_table(page):
"""
This function will return the financial statistics table on a stock's finviz page, if it exists as a
Python dictionary
:param page: HTML tree structure based on the html markup of the scraped web page.
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise will
return a empty dictionary
"""
value_names = page.xpath(VALUE_NAMES_XPATH)
values = page.xpath(VALUES_XPATH)
values = [value if value != "-" else None for value in values]
table = dict(zip(value_names, values))
return table
def get_statistic(ticker_symbol, stat_name, page=None):
"""
This function will get the associated financial statistic from the corresponding finviz page given the
statistic's name and the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GOOG", "MSFT")
:param stat_name: The name of the interested financial statistic (e.g., "P/E", "Price", "Volume").
An exhaustive list of available financial statistics can be found on a stock's finviz page
:param page: HTML tree structure based on the html markup of the scraped web page. If one is not passed in the
function will scrape the page
:return: the value of the interested financial statistic if it exists, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if stat_name in table.keys() and table[stat_name]:
return table[stat_name]
else:
return None
def get_all_statistics(ticker_symbol, page=None):
"""
This function will get all the associated financial statistics from the correspoding finviz page
given the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GGOG", "MSFT")
:param page: HTML tree structure based on the html markup of the scraped page. If one is not passed in the
function will scrape the page
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if table:
return table
else:
return None
def get_all_statistics_series(ticker_symbol):
"""
Return pandas Series of ticker symbol. Try to convert to numeric.
"""
if not pandasImported:
raise Exception("Pandas not installed.")
d = get_all_statistics(ticker_symbol)
new_dict = {}
for k,v in d.items():
if v == None:
continue
if ('%' in v) and (v.index('%') == (len(v)-1)):
# percent
new_dict[k + '(%)'] = float(v[:-1])
elif (k == '52W Range'):
m = re.match('([0-9\.\-]+) - ([0-9\.\-]+)',v)
new_dict['52W Low'] = float(m.group(1))
new_dict['52W High'] = float(m.group(2))
else:
try:
# remove any commas
v = re.sub(',','',v)
v = re.sub('B','E9',v) # expoentiate billions
v = re.sub('M','E6',v)
v = re.sub('K','E3',v)
new_dict[k] = float(v)
except ValueError:
new_dict[k] = v
return pd.Series(new_dict)
def get_all_statistics_df(symbol_list):
"""Return a dataframe for a list of symbols.
"""
series = []
for s in symbol_list:
series.append(get_all_statistics_series(s))
return pd.DataFrame(series,index=symbol_list)
if __name__ == "__main__":
# Test Cases
print(get_statistic("AAPL", "P/E"))
print(get_statistic("AAPL", "Inst Own"))
print(get_statistic("AAPL", "Change"))
print(get_statistic("AAPL", "This should return None"))
print(get_all_statistics("AAPL"))
|
ajpotato214/Finance-Data-Scraper-API
|
finance_data_scraper/scrapers/finviz.py
|
Python
|
mit
| 4,390
| 0.0082
|
from w3lib.html import remove_tags
from requests import session, codes
from bs4 import BeautifulSoup
# Net/gross calculator for student under 26 years
class Student:
_hours = 0
_wage = 0
_tax_rate = 18
_cost = 20
def __init__(self, hours, wage, cost):
self._hours = hours
self._wage = wage
self._cost = cost
def _get_real_tax_rate(self):
tax_from = (100 - self._cost) / 100
return tax_from * self._tax_rate / 100
def get_net(self):
return self._wage * self._hours
def get_gross(self):
value = self.get_net() / (1 - self._get_real_tax_rate())
return int(value + 0.5)
def get_tax_base(self):
return self.get_gross() - self.get_cost()
def get_cost(self):
return self.get_gross() - self.get_gross() * (100 - self._cost) / 100
def get_tax(self):
return self.get_gross() - self.get_net()
def get_cost_percentage(self):
return self._cost
# Net/gross calculator using web client with optional fallback
class WebCalculator:
_data = None
_calculator = None
_cost = 0
def __init__(self, hours, wage, cost):
from tools import Config
self._cost = cost
self._data = Config.get_calculator_bot().parse(hours * wage, 1 if cost == 50 else 0)
# Check if bot returned some data
if self._data == None:
self._calculator = Config.get_fallback_calculator()(hours, wage, cost)
def get_net(self):
if self._data == None:
return self._calculator.get_net()
return self._data['net']
def get_gross(self):
if self._data == None:
return self._calculator.get_gross()
return self._data['gross']
def get_tax_base(self):
if self._data == None:
return self._calculator.get_tax_base()
return self._data['tax_base']
def get_cost(self):
if self._data == None:
return self._calculator.get_cost()
return self._data['cost']
def get_tax(self):
if self._data == None:
return self._calculator.get_tax()
return self._data['tax']
def get_cost_percentage(self):
return self._cost
# Bot finding invoice values on wfirma.pl calculator page
class WfirmaPlBot:
_url = 'https://poradnik.wfirma.pl/staff_contract_headers/evaluate/errand'
# Send needed data
@staticmethod
def parse(net, copyright):
from tools import Config
# Prepare data for request
form_data = Config.get('wfirma.pl')
header_data = {
'quota_type': form_data['quota_type'],
'quota': net,
'company_incidental': form_data['company_incidental'],
}
form_data['copyright'] = copyright
with session() as c:
# convert data to format viable for url-encoding
data = {}
for k, v in form_data.items():
data['data[StaffContractErrand][%s]' % k] = v
for k, v in header_data.items():
data['data[StaffContractHeader][%s]' % k] = v
# Send the request to the server
try:
request = c.post(WfirmaPlBot._url, data=data, timeout=3)
except:
print('Przekroczono maksymalny czas oczekiwania na odpowiedź serwera')
return None
# There was some error (most likely server-side), so use offline fallback
if request.status_code != codes.ok:
print('Wystąpił błąd podczas pobierania danych do rachunku')
return None
return WfirmaPlBot._parse_results(request.text)
# Parse data returned on request
@staticmethod
def _parse_results(request_body):
# extract wanted data
soup = BeautifulSoup(request_body.replace('\n', ''), 'xml')
interesting_columns = soup.findAll('td')[1:15:2]
# convert to floats
interesting_columns = list(map(lambda x: float(x.get_text().replace(' ', '').replace(',', '.')), interesting_columns))
column_names = [
'net', 'gross', 'all_cost', 'insurance_base', 'cost', 'tax_base', 'tax',
]
result = {}
for i in range(0, 7):
result[column_names[i]] = interesting_columns[i]
return result
# @todo nie można ustalić kosztów uzyskania
class KalkulatoryNfBot:
_url = 'http://kalkulatory.nf.pl/kalkulator/wynagrodzenie/zlecenie'
# Send needed data
@staticmethod
def parse(net, copyright):
return None
from tools import Config
form_data = Config.get('kalkulatory.nf.pl')
form_data = {**form_data, **{
'stawka': 'net',
'kwota': net,
'_method': 'POST',
}}
with session() as c:
# Fix data format
data = {}
for k, v in form_data.items():
data['data[Calculator][%s]' % k] = v
# Try to make a request
try:
request = c.post(KalkulatoryNfBot._url, data=data, timeout=3)
except:
print('Przekroczono maksymalny czas oczekiwania na odpowiedź serwera')
return None
# There was some error (most likely server-side), so use offline fallback
if request.status_code != codes.ok:
print('Wystąpił błąd podczas pobierania danych do rachunku')
return None
return KalkulatoryNfBot._parse_results(request.text)
# Parse data returned on request
@staticmethod
def _parse_results(request_body):
# extract wanted data
soup = BeautifulSoup(request_body)
table = soup.select('div.calc-body.clr')[0].find_next_sibling().findAll('td')[4:]
del table[3:7] # remove unneded
table = list(map(lambda x: float(x.get_text().replace(' zł', '').replace(' ', '').replace(',', '.')), table))
column_names = [
'cost', 'tax_base', 'tax', 'gross', 'net'
]
result = {}
for i in range(0, 5):
result[column_names[i]] = table[i]
return result
|
tomekby/miscellaneous
|
jira-invoices/calculator.py
|
Python
|
mit
| 6,443
| 0.003889
|
import logging; logger = logging.getLogger("morse." + __name__)
import socket
import select
import json
import morse.core.middleware
from functools import partial
from morse.core import services
class MorseSocketServ:
def __init__(self, port, component_name):
# List of socket clients
self._client_sockets = []
self._message_size = 1024
self._component_name = component_name
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((str(socket.INADDR_ANY), port))
self._server.listen(1)
logger.info("Socket Mw Server now listening on port " + str(port) + \
" for component " + str(component_name) + ".")
def __del__(self):
""" Terminate the ports used to accept requests """
if self._client_sockets:
logger.info("Closing client sockets...")
for s in self._client_sockets:
s.close()
if self._server:
logger.info("Shutting down connections to server...")
self._server.shutdown(socket.SHUT_RDWR)
logger.info("Closing socket server...")
self._server.close()
del self._server
def main_export(self, encode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, sockets, [], 0)
except select.error:
pass
except socket.error:
pass
if self._server in inputready:
sock, addr = self._server.accept()
self._client_sockets.append(sock)
if outputready != []:
message = encode(component_instance)
for o in outputready:
try:
o.send(message)
except socket.error:
self.close_socket(o)
def main_read(self, decode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, [], [], 0)
except select.error:
pass
except socket.error:
pass
for i in inputready:
if i == self._server:
sock, addr = self._server.accept()
if self._client_sockets != []:
logger.warning("More than one clients for an actuator!!")
self._client_sockets.append(sock)
else:
try:
msg = i.recv(self._message_size)
logger.debug("received msg %s" % msg)
if msg == b'':
self.close_socket(i)
else:
component_instance.local_data = decode(msg)
except socket.error as detail:
self.close_socket(i)
def close_socket(self, sock):
self._client_sockets.remove(sock)
try:
sock.close()
except socket.error as error_info:
logger.warning("Socket error catched while closing: " + str(error_info))
class MorseSocketClass(morse.core.middleware.MorseMiddlewareClass):
""" External communication using sockets. """
def __init__(self):
""" Initialize the socket connections """
# Call the constructor of the parent class
super(self.__class__,self).__init__()
# port -> MorseSocketServ
self._server_dict = {}
# component name (string) -> Port (int)
self._component_nameservice = {}
self._base_port = 60000
# Register two special services in the socket service manager:
# TODO To use a new special component instead of 'simulation',
# uncomment the line :-)
# bge.logic.morse_services.register_request_manager_mapping("streams", "SocketRequestManager")
services.do_service_registration(self.list_streams, 'simulation')
services.do_service_registration(self.get_stream_port, 'simulation')
services.do_service_registration(self.get_all_stream_ports, 'simulation')
def list_streams(self):
""" List all publish streams.
"""
return list(self._component_nameservice.keys())
def get_stream_port(self, name):
""" Get stream port for stream name.
"""
port = -1
try:
port = self._component_nameservice[name]
except KeyError:
pass
return port
def get_all_stream_ports(self):
""" Get stream ports for all streams.
"""
return self._component_nameservice
def register_component(self, component_name, component_instance, mw_data):
""" Open the port used to communicate by the specified component.
"""
# Create a socket server for this component
serv = MorseSocketServ(self._base_port, component_name)
self._server_dict[self._base_port] = serv
self._component_nameservice[component_name] = self._base_port
self._base_port = self._base_port + 1
# Extract the information for this middleware
# This will be tailored for each middleware according to its needs
function_name = mw_data[1]
fun = self._check_function_exists(function_name)
if fun != None:
# Choose what to do, depending on the function being used
# Data read functions
if function_name == "read_message":
component_instance.input_functions.append(partial(MorseSocketServ.main_read, serv, fun))
# Data write functions
elif function_name == "post_message":
component_instance.output_functions.append(partial(MorseSocketServ.main_export, serv, fun))
# If the function is external and has already been loaded before
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
def post_message(self, component_instance):
return (json.dumps(component_instance.local_data) + '\n').encode()
def read_message(self, msg):
return json.loads(msg.decode('utf-8'))
def print_open_sockets(self):
""" Display a list of all currently opened sockets."""
logger.info("Socket Mid: Currently opened sockets:")
for name, socket in self._socket_dict.iteritems():
logger.info(" - Port name '{0}' = '{1}'".format(name, socket))
|
Arkapravo/morse-0.6
|
src/morse/middleware/socket_mw.py
|
Python
|
bsd-3-clause
| 6,797
| 0.00206
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('interpreter', '0010_auto_20141215_0027'),
]
operations = [
migrations.RemoveField(
model_name='band',
name='members',
),
migrations.AddField(
model_name='artist',
name='band',
field=models.ManyToManyField(related_name='members', null=True, to='interpreter.Band', blank=True),
preserve_default=True,
),
]
|
nanomolina/MusicWeb
|
src/Music/apps/interpreter/migrations/0011_auto_20141215_0030.py
|
Python
|
mit
| 600
| 0.001667
|
#
# SPDX-FileCopyrightText: 2016 Dmytro Kolomoiets <amerlyq@gmail.com> and contributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
from miur.cursor import state, update, message as msg
class Dispatcher:
"""Apply actions to any unrelated global states"""
def _err_wrong_cmd(self):
# Move err processing to 'update.py' (make more symmetrical)
# _log.error("Wrong cmd: {}".format(cmd))
raise NotImplementedError
def focus_node_next(self):
if state.cursor is not None and state.entries is not None:
state.cursor = min(state.cursor + 1, len(state.entries) - 1)
def focus_node_prev(self):
if state.cursor is not None and state.entries is not None:
state.cursor = max(state.cursor - 1, 0)
def focus_node_beg(self):
if state.entries is not None:
state.cursor = 0
def focus_node_end(self):
if state.entries is not None:
state.cursor = len(state.entries) - 1
def shift_node_parent(self):
# DEV: combine these multiple queue in single request to *core*
# state.path =
# TEMP: apply directly to global state
# TEMP: send msg and wait until fully processed (send-recv-apply)
update.handle(msg.NodeGetParentMsg())
update.handle(msg.ListNodeMsg())
state.cursor = 0 if state.entries else None
def shift_node_current(self):
if state.cursor is None or state.entries is None:
return
# WARN: must send both (p, e) for *core*
# => to check if (p, e) is still available in fs
update.handle(msg.NodeGetChildMsg())
update.handle(msg.ListNodeMsg())
state.cursor = 0 if state.entries else None
|
miur/miur
|
OLD/miur/cursor/dispatch.py
|
Python
|
gpl-3.0
| 1,733
| 0.000577
|
#!/usr/bin/env python
#encoding:utf8
#
# file: filter6_tests.py
# author: sl0
# date: 2013-03-06
#
import unittest
from adm6.filter6 import IP6_Filter, Ip6_Filter_Rule
from sys import stdout
from os.path import expanduser as homedir
from ipaddr import IPv6Network
from os import getenv as get_env
home_dir_replacement = get_env("HOME")
rule = {}
class Ip6_Filter_Rule_tests(unittest.TestCase):
"""
some tests for class Ip6_Filter_Rule
"""
def test_01_create_Filter_Rule(self):
"""
fr-01 create Filter_Rule object
"""
my_err = False
try:
f = Ip6_Filter_Rule(rule)
except:
my_err = True
self.assertFalse(my_err)
self.assertFalse(f['i_am_d'])
self.assertFalse(f['i_am_s'])
self.assertFalse(f['travers'])
self.assertFalse(f['insec'])
self.assertFalse(f['noif'])
self.assertFalse(f['nonew'])
self.assertFalse(f['nostate'])
self.assertEqual(f['sport'], u'1024:')
self.assertEqual(['Rule-Nr', 'Pair-Nr', 'RuleText'], f.CommentList)
self.assertEqual(['Output', 'debuglevel'], f.NeverDisplay)
displaylist = ['Rule-Nr', 'Pair-Nr', 'System-Name', 'System-Forward',
'OS', 'Asymmetric', 'RuleText', 'Source', 'Destin', 'Protocol',
'sport', 'dport', 'Action', 'nonew', 'noif', 'nostate', 'insec',
'i_am_s', 'i_am_d', 'travers', 'source-if', 'source-rn',
'src-linklocal', 'src-multicast', 'destin-if', 'destin-rn',
'dst-linklocal', 'dst-multicast', ]
self.assertEqual(displaylist, f.DisplayList)
#f['debuglevel'] = True
#print f
def test_02_produce_for_invalid_os_name(self):
"""
fr-02 produce for invalid os name
"""
my_err = False
try:
fr = Ip6_Filter_Rule(rule)
except:
my_err = True
fr['OS'] = 'Invalid os name'
self.assertRaises(ValueError, fr.produce ,stdout)
def test_03_produce_for_linux_as_traversed(self):
"""
fr-03 produce for linux as traversed host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
print "M:", fr.msg
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_04_produce_for_openbsd(self):
"""
fr-04 produce for OpenBSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
#expect = """# n o t y e t i m p l e m e n t e d !"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_05_produce_for_bsd(self):
"""
fr-05 produce for BSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'BSD'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_06_produce_for_opensolaris(self):
"""
fr-06 produce for OpenSolaris
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenSolaris'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_07_produce_for_wxp(self):
"""
fr-07 produce for WXP
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Win-XP-SP3'
except:
my_err = True
fr.produce(ofile)
expect = "# System should not forward until redesigned"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_08_repr_with_debuglevel(self):
"""
fr-08 repr with debuglevel
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
fr['debuglevel'] = True
value = str(fr)
print "V:", value
expect = """# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Forward : True #
# OS : Debian #
# Source : 2001:db8:1::1 #
# Destin : 2001:db8:2::1 #
# Protocol : tcp #
# sport : 1024: #
# dport : 22 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : False #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# src-linklocal : False #
# destin-if : eth0 #
# dst-linklocal : False #
"""
self.assertEquals(expect, value)
def test_09_repr_without_debuglevel(self):
"""
fr-09 repr without debuglevel
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
fr['debuglevel'] = False
fr['Abrakadabra'] = True
value = str(fr)
print "V:", value
expect = """# Rule-Nr : 1 #
# Pair-Nr : 1 #
# Abrakadabra : True #
"""
self.assertEquals(expect, value)
def test_10_produce_for_linux_as_source(self):
"""
fr-10 produce for linux as source host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -o eth1 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_11_produce_for_linux_as_source_icmpv6(self):
"""
fr-11 produce for linux as source host icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['noif'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -s 2001:db8:1::1 -d 2001:db8:2::1 -p icmpv6 --icmpv6-type echo-request -j ACCEPT -m comment --comment "1,1"\necho -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_12_produce_for_linux_as_source_nonew(self):
"""
fr-12 produce for linux as source host nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "21"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -o eth1 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 21 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 21 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
print fr.msg
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_13_produce_for_linux_as_dest(self):
"""
fr-13 produce for linux as dest host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A input___new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -o eth0 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_14_produce_for_linux_as_traversed(self):
"""
fr-14 produce for linux as traversed host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_15_produce_for_linux_as_traversed_reject(self):
"""
fr-15 produce for linux reject rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j REJECT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j REJECT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_16_produce_for_linux_as_traversed_drop(self):
"""
fr-16 produce for linux drop rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "drop"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j DROP -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j DROP -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_17_produce_for_linux_as_traversed_insec(self):
"""
fr-17 produce for linux accept rule insec
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 0: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 0: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_18_produce_for_linux_ip6(self):
"""
fr-18 produce for linux ip6 accept rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_19_produce_for_linux_ip6_forced(self):
"""
fr-19 produce for linux ip6 forced accept rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['forced'] = True
fr['i_am_s'] = True
fr['i_am_d'] = True
fr['travers'] = True
fr['noif'] = True
fr['nostate'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A input___new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A forward_new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_20_produce_for_linux_forward_forbidden(self):
"""
fr-20 produce for linux ip6 forward forbidden
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = False
fr['forced'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['noif'] = True
fr['nostate'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """# System-Forward: False ==> no rule generated"""
self.maxDiff = None
#print "M:", fr.msg
self.assertEquals(expect, fr.msg)
def test_21_produce_for_linux_forward_linklocal(self):
"""
fr-21 produce for linux forward linklocal
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "fe80::e:db8:1:1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['forced'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = True
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = "# link-local ==> no forward"
self.maxDiff = None
#print "M:", fr.msg
self.assertEquals(expect, fr.msg)
def test_22_produce_for_openbsd_icmpv6(self):
"""
fr-22 produce for OpenBSD icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_23_produce_for_openbsd_tcp_nonew(self):
"""
fr-23 produce for OpenBSD tcp nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "4711"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_24_produce_for_openbsd_tcp_drop(self):
"""
fr-24 produce for OpenBSD tcp drop
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "4711"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_25_produce_for_openbsd_ip6(self):
"""
fr-25 produce for OpenBSD ip6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_26_produce_for_openbsd_commented(self):
"""
fr-26 produce for OpenBSD commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, True)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_27_produce_for_openbsd_commented(self):
"""
fr-27 produce for OpenBSD forward forbidden
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['System-Forward'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# System does not forward by configuration"
#expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_28_produce_for_openbsd_noif(self):
"""
fr-28 produce for OpenBSD forward noif
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_29_produce_for_openbsd_dst_linklocal(self):
"""
fr-29 produce for OpenBSD forward dst-link-local
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = True
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# dst-link-local ==> no filter rule generated"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_30_produce_for_wxp_tcp(self):
"""
fr-30 produce for wxp tcp
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_31_produce_for_wxp_icmpv6(self):
"""
fr-31 produce for wxp icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['noif'] = False
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_32_produce_for_wxp_nonew(self):
"""
fr-32 produce for wxp nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "25"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_33_produce_for_wxp_reject_insec(self):
"""
fr-33 produce for wxp reject insec
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "25"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_34_produce_for_wxp_ip6_commented(self):
"""
fr-34 produce for wxp ip6 commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_34_produce_for_wxp_ip6_commented(self):
"""
fr-34 produce for wxp ip6 commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# System should not forward by configuration"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_35_produce_for_wxp_dst_linklocal(self):
"""
fr-35 produce for wxp dst-linklocal
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = False
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = True
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# dst-linklocal ==> no rule generated"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
class Ip6_Filter_tests(unittest.TestCase):
'''some tests for class Ip6_Filter_Rule'''
def test_01_IP6_Filter_create_Debian(self):
"""
ft-01 IP6 Filter create an object for Debian
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Debian GNU/Linux wheezy"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Debian')
def test_02_IP6_Filter_create_OpenBSD(self):
"""
ft-02 IP6 Filter create an object for OpenBSD
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "OpenBSD 4.5"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'OpenBSD')
def test_03_IP6_Filter_create_OpenSolaris(self):
"""
ft-03 IP6 Filter create an object for OpenSolaris
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "OpenSolaris unknown version"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'OpenSolaris')
def test_04_IP6_Filter_create_win_xp_sp3(self):
"""
ft-04 IP6 Filter create an object for WXP SP3
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Win-XP-SP3"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Win-XP-SP3')
def test_05_IP6_Filter_create_unknown_os(self):
"""
ft-05 IP6 Filter create an object for unknown os
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Unknown OS"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Unknown operating system for host: ns')
def test_06_IP6_Filter_append_first_rule(self):
"""
ft-06 IP6 Filter append first rule
"""
debug = False
name = "ns"
path = "desc/ns/"
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
fi.append(rule_one)
expect = [rule_one, ]
self.assertEqual(expect, fi.rules)
def test_07_IP6_Filter_mangle_start_exist(self):
"""
ft-07 IP6 Filter mangle-start exisiting file
"""
debug = False
name = "www"
#path = "HOME_DIR/adm6/desc/www"
mach_dir = "~/adm6/desc/www"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
fi.msg = ""
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-startup"
fi.mangle_file(ofile, file_to_read)
expect = "# start reading mangle-file: %s/" % (path)
expect += file_to_read
expect += "# mangle-startup file for testing \n"
value = fi.msg
self.assertEqual(expect, value)
def test_08_IP6_Filter_mangle_end_exist(self):
"""
ft-08 IP6 Filter mangle-end exisiting file
"""
debug = False
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/adm6"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-endup"
fi.msg = ""
fi.mangle_file(ofile, file_to_read)
expect = "# failed reading mangle-file: %s/" % (path)
#expect = "# start reading mangle-file: %s/" % (path)
expect += file_to_read
expect += ", but OK"
value = fi.msg
self.assertEqual(expect, value)
def test_09_IP6_Filter_mangle_end_non_exist(self):
"""
ft-09 IP6 Filter mangle-end non exisiting file
"""
debug = False
name = "adm6"
#path = "HOME_DIR/adm6/desc/adm6"
mach_dir = "~/adm6/desc/adm6"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-endup"
fi.msg = ""
fi.mangle_file(ofile, file_to_read)
temp = "# failed reading mangle-file: %s/" % (path)
temp += file_to_read
temp = "# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK"
expect = temp.replace("HOME_DIR", home_dir_replacement)
value = fi.msg
self.assertEqual(expect, value)
def test_10_IP6_Filter_final_this_rule(self):
"""
ft-10 IP6 Filter final this rule
"""
debug = True
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/ns"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofn = "/dev/null"
ofile = open(ofn, 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(2) # Rule-Nr.
rule.append(3) # Pair-Nr.
rule.append(True)
rule.append(False)
rule.append(IPv6Network('fe80::1')) # source
rule.append(IPv6Network('ff80::4711')) # destin
rule.append('eth0') # source-if
rule.append(3) # source-rn
rule.append('eth0') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711:4713') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE') # append options at last
fi.rules.append(rule)
fi.final_this_rule(rule, ofile)
value = fi.msg
expect = """# ---------------------------------------------------------------------------- #
# Rule-Nr : 2 #
# Pair-Nr : 3 #
# System-Name : ns #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : RuleText #
# Source : fe80::1/128 #
# Destin : ff80::4711/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711:4713 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : False #
# travers : False #
# source-if : eth0 #
# source-rn : 3 #
# src-linklocal : True #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : True #
/sbin/ip6tables -A output__new -s fe80::1/128 -d ff80::4711/128 -p udp --sport 1024: --dport 4711:4713 -j ACCEPT -m comment --comment "2,3"
/sbin/ip6tables -A input___new -d fe80::1/128 -s ff80::4711/128 -p udp --dport 1024: --sport 4711:4713 -j ACCEPT -m comment --comment "2,3"
echo -n ".";"""
value = fi.msg
self.assertEqual(expect, value)
def test_11_IP6_Filter_final_this_rule_forced_linklocal(self):
"""
ft-11 IP6 Filter final this rule forced linklocal
"""
debug = True
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/ns"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofn = "/dev/null"
ofile = open(ofn, 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(2) # Rule-Nr.
rule.append(3) # Pair-Nr.
rule.append(True) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('fe80::1')) # source
rule.append(IPv6Network('ff80::4711')) # destin
rule.append('eth0') # source-if
rule.append(3) # source-rn
rule.append('eth0') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711:4713') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE FORCED') # options at last
fi.rules.append(rule)
fi.final_this_rule(rule, ofile)
value = fi.msg
expect = """# ---------------------------------------------------------------------------- #
# Rule-Nr : 2 #
# Pair-Nr : 3 #
# System-Name : ns #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : RuleText #
# Source : fe80::1/128 #
# Destin : ff80::4711/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711:4713 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : True #
# travers : True #
# source-if : eth0 #
# source-rn : 3 #
# src-linklocal : True #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : True #
# link-local ==> no forward"""
value = fi.msg
self.assertEqual(expect, value)
def test_12_IP6_Filter_mach_output_as_src(self):
"""
ft-12 IP6 Filter mach_output as src
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(True) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth0') # destin-if
rule.append(1) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE FORCED') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : True #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 1 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A output__new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A input___new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A forward_new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
self.assertEquals(expect, value)
def test_13_IP6_Filter_mach_output_as_travers(self):
"""
ft-13 IP6 Filter mach_output as travers
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NOSTATE') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : True #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
#print "M:", value
expect = temp.replace("HOME_DIR", home_dir_replacement)
self.assertEquals(expect, value)
def test_14_IP6_Filter_mach_output_as_stateful_travers(self):
"""
ft-14 IP6 Filter mach_output as stateful travers
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : False #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
value_len = len(value)
expect_len = len(expect)
self.assertEquals(expect_len, value_len)
self.assertEquals(expect, value)
def test_15_IP6_Filter_mach_output_as_real_file(self):
"""
ft-15 IP6 Filter mach_output as real file
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = True
ofilename = None
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(True) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NONEW NOIF INSEC') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-18 23:38 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : True #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : True #
# noif : True #
# nostate : True #
# insec : True #
# i_am_s : False #
# i_am_d : True #
# travers : False #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A input___new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 0: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 0: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
value_len = len(value)
expect_len = len(expect)
self.assertEquals(expect_len, value_len)
if __name__ == "__main__":
unittest.main()
|
sl0/adm6
|
tests/test_03_filter6.py
|
Python
|
gpl-3.0
| 102,052
| 0.003439
|
"""
Classes and functions for interacting with system management daemons.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import ldap
import ldap.modlist
import xmlrpc.client
from .utilities import errors
from dbus import SystemBus, Interface
class ConnectionsManager:
"""Manages arkOS connections to system-level processes via their APIs."""
def __init__(self, config, secrets):
self.config = config
self.secrets = secrets
def connect(self):
"""Initialize the connections."""
self.connect_services()
self.connect_ldap()
def connect_services(self):
self.DBus = SystemBus()
self.SystemD = self.SystemDConnect(
"/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager")
self.Supervisor = supervisor_connect()
def connect_ldap(self):
self.LDAP = ldap_connect(
config=self.config, passwd=self.secrets.get("ldap")
)
def SystemDConnect(self, path, interface):
systemd = self.DBus.get_object("org.freedesktop.systemd1", path)
return Interface(systemd, dbus_interface=interface)
def ldap_connect(
uri="", rootdn="", dn="cn=admin", config=None, passwd="",
conn_type=""):
"""
Initialize a connection to arkOS LDAP.
:param str uri: LDAP host URI
:param str rootdn: Root DN
:param str dn: User DN
:param Config config: arkOS config to use for default values
:param str passwd: Password to use to validate credentials
:returns: LDAP connection object
"""
if not all([uri, rootdn, dn]) and not config:
raise errors.InvalidConfigError("No LDAP values passed")
uri = uri or config.get("general", "ldap_uri")
rootdn = rootdn or config.get("general", "ldap_rootdn")
conn_type = conn_type or config.get("general", "ldap_conntype")
if conn_type == "dynamic":
c = ldap.ldapobject.ReconnectLDAPObject(
uri, retry_max=3, retry_delay=5.0)
else:
c = ldap.initialize(uri)
try:
c.simple_bind_s("{0},{1}".format(dn, rootdn), passwd)
except ldap.INVALID_CREDENTIALS:
raise errors.ConnectionError("LDAP", "Invalid username/password")
except Exception as e:
raise errors.ConnectionError("LDAP") from e
if dn != "cn=admin":
data = c.search_s("cn=admins,ou=groups,{0}".format(rootdn),
ldap.SCOPE_SUBTREE, "(objectClass=*)",
["member"])[0][1]["member"]
if "{0},{1}".format(dn, rootdn) not in data:
raise errors.ConnectionError("LDAP", "Not an administrator")
return c
def supervisor_connect():
"""
Initialize a connection to Supervisor via XML-RPC API.
:returns: XML-RPC connection object
"""
try:
s = xmlrpc.client.Server("http://localhost:9001/RPC2")
return s.supervisor
except Exception as e:
raise errors.ConnectionError("Supervisor") from e
|
pomarec/core
|
arkos/connections.py
|
Python
|
gpl-3.0
| 3,011
| 0
|
"""Commands for argparse for basket command"""
import textwrap
from PyBake import Path
from PyBake.commands import command
@command("basket")
class BasketModuleManager:
"""Module Manager for Basket"""
longDescription = textwrap.dedent(
"""
Retrieves pastries from the shop.
""")
def createArguments(self, basketParser):
basketParser.add_argument("shoppingList",
nargs="?",
default=Path("shoppingList.py"),
type=Path,
help="The shopping list script that describes which pastries are required. "
"Default: 'shoppingList.py'")
basketParser.add_argument("--force-download",
dest="force",
action="append_const",
const="download",
help="Download all required pastries, whether they exist locally already or not.")
basketParser.add_argument("--force-install",
dest="force",
action="append_const",
const="install",
help="Perform an install, regardless whether the pastry is already installed or not.")
basketParser.add_argument("--force",
dest="force",
action="append_const",
const="all",
help="Implies --force-download and --force-install.")
basketParser.set_defaults(func=execute_basket)
def execute_basket(args):
"""Execute the `basket` command."""
from PyBake import log
force = args.force or []
del args.force
args.forceDownload = any(arg in ("all", "download") for arg in force)
args.forceInstall = any(arg in ("all", "install") for arg in force)
log.debug(args)
from PyBake import basket
return basket.run(**vars(args))
|
lab132/PyBake
|
PyBake/commands/basketCommand.py
|
Python
|
mit
| 1,990
| 0.009045
|
#! /usr/bin/python3
#
# This source code is part of icgc, an ICGC processing pipeline.
#
# Icgc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Icgc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
# some pathways do not have the associated genes listed, probably by mistake
# examples:
# R-HSA-1483171 | Synthesis of BMP
# R-HSA-2408499 | Formation of selenosugars for excretion
from icgc_utils.common_queries import quotify
from icgc_utils.reactome import *
from config import Config
############
def print_genes(cursor, gene_ids, depth):
if len(gene_ids)<1:
print("\t"*depth, "no genes listed")
return
#print("\t"*depth, "print genes here")
gene_id_string = ",".join([quotify(z) for z in gene_ids])
qry = "select ensembl_gene_id, approved_name from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_names = dict(hard_landing_search(cursor, qry))
qry = "select ensembl_gene_id, approved_symbol from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_symbols = dict(hard_landing_search(cursor, qry))
for gene in gene_ids:
print("\t"*depth, gene_symbols.get(gene,""), gene_names.get(gene,""))
return
##############
def characterize_subtree(cursor, graph, pthwy_id, gene_groups, depth, verbose=True):
# this is the whole subtree
# children = [node for node in nx.dfs_preorder_nodes(graph, pthwy_id)]
# A successor of n is a node m such that there exists a directed edge from n to m.
children = [node for node in graph.successors(pthwy_id)]
if len(children)==0: return False
node_id_string = ",".join([quotify(z) for z in children])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
children_names = hard_landing_search(cursor, qry_template % node_id_string)
for child_id, child_name in children_names:
# number_of_genes = genes related to nodes without descendants
genes = genes_in_subgraph(cursor, graph, child_id)
if verbose: print("\t"*depth, child_id, child_name, len(genes))
if len(genes)<100:
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
if not characterize_subtree(cursor, graph, child_id, gene_groups, depth+1, verbose=verbose): # no further subdivisions
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
return True
#########################################
import numpy as np
from matplotlib import pyplot as plt
def hist_plot(gene_groups):
data = [len(gene_list) for gene_list in list(gene_groups.values())]
# fixed bin size
bins = np.arange(0, 505, 5) # fixed bin size
plt.xlim(0,500)
plt.hist(data, bins=bins, alpha=0.5)
# plt.title('')
plt.xlabel('number of genes in group (bin size = 5)')
plt.ylabel('number of groups')
#
plt.show()
####################################################
def main():
verbose = False
db = connect_to_mysql(Config.mysql_conf_file)
cursor = db.cursor()
switch_to_db(cursor, 'icgc')
# are there children with multiple parents? Yes. So I need some kind of
# directed graph, rather tha a tree.
qry = "select child, count(distinct parent) as ct from reactome_hierarchy "
qry += "group by child having ct>1"
ret = search_db(cursor, qry)
print("number of children with multiple parents:", len(ret))
# feed the parent/child pairs as edges into graph
graph = build_reactome_graph(cursor, verbose=True)
# candidate roots
zero_in_degee_nodes = get_roots(graph)
node_id_string = ",".join([quotify(z) for z in zero_in_degee_nodes])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
root_names = hard_landing_search(cursor, qry_template% node_id_string)
gene_groups = {}
for pthwy_id, name in root_names:
if "disease" in name.lower(): continue
if verbose: print(pthwy_id, name)
characterize_subtree(cursor, graph, pthwy_id, gene_groups, 1, verbose=verbose)
print("\n===========================")
max_group=0
for group, genes in gene_groups.items():
groupsize = len(genes)
if max_group< groupsize: max_group=groupsize
print (group, len(genes))
print("\n===========================")
print("number of groups", len(gene_groups))
print("largest group", max_group)
print("\n===========================")
for pthwy_name, genes in gene_groups.items():
if len(genes)<=150: continue
print("\n",pthwy_name, len(genes))
#print_genes(cursor, genes, 1)
#hist_plot(gene_groups)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
|
ivanamihalek/tcga
|
icgc/60_nextgen_production/65_reactome_tree.py
|
Python
|
gpl-3.0
| 5,057
| 0.024916
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import cloud_storage
from telemetry.util import bootstrap
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def _InDirectory(subdirectory, directory):
subdirectory = os.path.realpath(subdirectory)
directory = os.path.realpath(directory)
common_prefix = os.path.commonprefix([subdirectory, directory])
return common_prefix == directory
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(
os.path.realpath(os.path.join(util.GetChromiumSrcDir(), os.pardir, path))
for path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not _InDirectory(module_path, util.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
util.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
for page in page_set:
if page.is_file:
yield page.serving_dir
def FindExcludedFiles(files, options):
def MatchesConditions(path, conditions):
for condition in conditions:
if condition(path):
return True
return False
# Define some filters for files.
def IsHidden(path):
for pathname_component in path.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path):
return os.path.splitext(path)[1] == '.pyc'
def IsInCloudStorage(path):
return os.path.exists(path + '.sha1')
def MatchesExcludeOptions(path):
for pattern in options.exclude:
if (fnmatch.fnmatch(path, pattern) or
fnmatch.fnmatch(os.path.basename(path), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for path in files:
if MatchesConditions(path, exclude_conditions):
yield path
def FindDependencies(paths, options):
# Verify arguments.
for path in paths:
if not os.path.exists(path):
raise ValueError('Path does not exist: %s' % path)
dependencies = path_set.PathSet()
# Including __init__.py will include Telemetry and its dependencies.
# If the user doesn't pass any arguments, we just have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py')))
dependencies |= FindBootstrapDependencies(util.GetTelemetryDir())
# Add dependencies.
for path in paths:
base_dir = os.path.dirname(os.path.realpath(path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path, base_dir))
zip_file.write(path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for path in paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(path, gsutil_base_dir))
zip_file.write(path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
paths = args.positional_args
dependencies = FindDependencies(paths, args)
if args.zip:
ZipDependencies(paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
|
chromium2014/src
|
tools/telemetry/telemetry/util/find_dependencies.py
|
Python
|
bsd-3-clause
| 9,256
| 0.010372
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
import firstboot.serverconf
from ChefConf import ChefConf
from GCCConf import GCCConf
from AuthConf import AuthConf
from DateSyncConf import DateSyncConf
from UsersConf import UsersConf
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class ServerConf():
# Version of the configuration JSON file
def __init__(self):
self._data = {}
self.VERSION = '0.2.0'
self._data['gem_repo'] = 'http://rubygems.org'
self._data['version'] = self.VERSION
self._data['organization'] = ''
self._chef_conf = ChefConf()
self._gcc_conf = GCCConf()
self._auth_conf = AuthConf()
self._ntp_conf = DateSyncConf()
self._users_conf = UsersConf()
def load_data(self, conf):
msg = 'ServerConf: Key "%s" not found in the configuration file.'
try:
v = conf['version']
if v != self.VERSION:
print 'WARNING: ServerConf and AUTOCONFIG_JSON version mismatch!'
except KeyError as e:
print msg % ('version',)
try:
self.set_organization(conf['organization'])
except KeyError as e:
print msg % ('organization',)
try:
self.set_notes(conf['notes'])
except KeyError as e:
print msg % ('notes',)
try:
self.set_gem_repo(conf['gem_repo'])
except KeyError as e:
print msg % ('gem_repo',)
try:
self._chef_conf.load_data(conf['chef'])
except KeyError as e:
print msg % ('chef',)
try:
self._gcc_conf.load_data(conf['gcc'])
except KeyError as e:
print msg % ('gcc',)
try:
self._auth_conf.load_data(conf['auth'])
except KeyError as e:
print msg % ('auth',)
try:
self._ntp_conf.load_data(conf['uri_ntp'])
except KeyError as e:
print msg % ('ntp',)
def validate(self):
valid = len(self._data['version']) > 0 \
and self._chef_conf.validate() \
and self._auth_conf.validate() \
and self._ntp_conf.validate() \
and self._gcc_conf.validate()
return valid
def set_gem_repo(self, repo):
self._data['gem_repo'] = repo
return self
def get_gem_repo(self):
return self._data['gem_repo'].encode('utf-8')
def get_version(self):
return self._data['version'].encode('utf-8')
def set_version(self, version):
self._data['version'] = version
return self
def get_organization(self):
return self._data['organization'].encode('utf-8')
def set_organization(self, organization):
self._data['organization'] = organization
return self
def get_notes(self):
return self._data['notes'].encode('utf-8')
def set_notes(self, notes):
self._data['notes'] = notes
return self
def get_auth_conf(self):
return self._auth_conf
def get_chef_conf(self):
return self._chef_conf
def get_ntp_conf(self):
return self._ntp_conf
def get_gcc_conf(self):
return self._gcc_conf
def get_users_conf(self):
return self._users_conf
def set_auth_conf(self, auth_conf):
self._auth_conf = auth_conf
return self
def set_chef_conf(self, chef_conf):
self._chef_conf = chef_conf
return self
def set_ntp_conf(self, ntp_conf):
self._ntp_conf = ntp_conf
return self
def set_gcc_conf(self, gcc_conf):
self._gcc_conf = gcc_conf
return gcc_conf
def set_users_conf(self, user_conf):
self._users_conf = user_conf
return self
|
rcmorano/gecosws-config-assistant
|
firstboot/serverconf/ServerConf.py
|
Python
|
gpl-2.0
| 5,970
| 0.00067
|
# -*- coding: utf-8 -*-
#############################
# Light IMDb Ratings Update #
# by axlt2002 #
#############################
# changes by dziobak #
#############################
import xbmc, xbmcgui
import sys
if sys.version_info >= (2, 7): import json as jSon
else: import simplejson as jSon
from common import *
from imdb_scraper import parse_IMDb_page
from tvdb_scraper import get_IMDb_ID_from_theTVDB
from tmdb_scraper import get_IMDb_ID_from_TMDb
from thread import start_new_thread, allocate_lock
max_threads = int(NumberOfThreads) - 1 #0 - 1 thread, 1 - 2 threads ...
num_threads = 0
def thread_parse_IMDb_page(dType, dbID, IMDb, Title, Rating, Votes, TVDB, TMDB, season, episode, lock, flock):
#movie: MovieID, IMDb, Title, Rating, Votes, Top250
#tvshow: TVShowID, IMDb, Title, Rating, Votes, TVDB, TMDB
#episode: EpisodeID, IMDb, Title, Rating, Votes, TVDB, TMDB, season, episode
global num_threads
if IMDb == None or IMDb == "" or "tt" not in IMDb: IMDb = None
Top250 = None
if dType == "movie":
Top250 = TVDB
if Top250 == None: Top250 = 0
TVDB = None
defaultLog( addonLanguage(32507) % ( Title, IMDb, TVDB, TMDB ) )
if IMDb == None:
if dType == "tvshow" or dType == "episode":
(IMDb, statusInfo) = get_IMDb_ID_from_theTVDB(dType, TVDB)
if IMDb == None:
(IMDb, add_statusInfo) = get_IMDb_ID_from_TMDb(dType, TMDB, season, episode)
statusInfo = statusInfo + "\n" + add_statusInfo
elif dType == "movie":
statusInfo = "Missing IMDb ID"
if IMDb == None:
defaultLog( addonLanguage(32503) % ( Title ) )
flock.acquire()
try:
statusLog( Title + ":\n" + statusInfo )
finally:
flock.release()
lock.acquire()
num_threads -= 1
lock.release()
return
(updatedRating, updatedVotes, updatedTop250, statusInfo) = parse_IMDb_page(IMDb)
if updatedRating == None:
defaultLog( addonLanguage(32503) % ( Title ) )
flock.acquire()
try:
statusLog( Title + ":\n" + statusInfo )
finally:
flock.release()
else:
Rating = str( float( ( "%.1f" % Rating ) ) )
Votes = '{:,}'.format( int ( Votes ) )
defaultLog( addonLanguage(32499) % ( Rating, Votes, Top250 ) )
if (dType != "movie"):
updatedTop250 = None
if Rating != updatedRating or ( Votes != updatedVotes and \
((dType == "movie" and IncludeMoviesVotes == "true" ) or ((dType == "tvshow" or dType == "episode") and IncludeTVShowsVotes == "true")) or \
( dType == "movie" and (Top250 != updatedTop250) and IncludeMoviesTop250 == "true" )):
if (dType == "movie"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetMovieDetails","params":{"movieid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","top250":' + str( updatedTop250 ) + '},"id":1}'
elif (dType == "tvshow"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetTVShowDetails","params":{"tvshowid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}'
elif (dType == "episode"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetEpisodeDetails","params":{"episodeid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
defaultLog( addonLanguage(32500) % ( Title, str( updatedRating ), str( updatedVotes ), str( updatedTop250 ) ) )
else:
defaultLog( addonLanguage(32502) % ( Title ) )
lock.acquire()
num_threads -= 1
lock.release()
return
class Movies:
def __init__( self ):
defaultLog( addonLanguage(32255) )
statusLog( "\n" + "--> " + addonLanguage(32255).rsplit(' ', 1)[0] )
if ShowNotifications == "true":
doNotify( addonLanguage(32255), 5000 )
xbmc.sleep(5000)
self.AllMovies = []
self.getDBMovies()
self.lock = allocate_lock()
self.flock = allocate_lock()
self.doUpdate()
defaultLog( addonLanguage(32258) )
if ShowNotifications == "true":
doNotify( addonLanguage(32258), 5000 )
xbmc.sleep(5000)
def getDBMovies( self ):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetMovies","params":{"properties":["imdbnumber","rating","votes","top250","playcount"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'movies' ):
for item in jSonResponse['result']['movies']:
MovieID = item.get('movieid'); IMDb = item.get('imdbnumber'); Title = item.get('label');
Rating = item.get('rating'); Votes = item.get('votes'); Top250 = item.get('top250'); Watched = item.get('playcount');
self.AllMovies.append( ( MovieID, IMDb, Title, Rating, Votes, Top250, Watched ) )
except: pass
def doUpdate( self ):
global num_threads
AllMovies = len( self.AllMovies ); Counter = 0;
if ShowProgress == "true":
Progress = xbmcgui.DialogProgressBG()
Progress.create( addonLanguage(32261) )
for Movie in self.AllMovies:
while num_threads > max_threads:
xbmc.sleep(500)
if ShowProgress == "true":
Counter = Counter + 1
Progress.update( (Counter*100)/AllMovies, addonLanguage(32261), Movie[2] )
if int(Movie[6]) > 0 and ExcludeWatched == "true":
defaultLog( addonLanguage(32504) % ( Movie[2] ) )
continue
start_new_thread(thread_parse_IMDb_page,("movie",Movie[0],Movie[1],Movie[2],Movie[3],Movie[4],Movie[5],"","","",self.lock,self.flock))
self.lock.acquire()
num_threads += 1
self.lock.release()
while num_threads > 0:
xbmc.sleep(500)
if ShowProgress == "true":
Progress.close()
class TVShows:
def __init__( self ):
defaultLog( addonLanguage(32256) )
statusLog( "\n" + "--> " + addonLanguage(32256).rsplit(' ', 1)[0] )
if ShowNotifications == "true":
doNotify( addonLanguage(32256), 5000 )
xbmc.sleep(5000)
self.AllTVShows = []
self.getDBTVShows()
self.lock = allocate_lock()
self.flock = allocate_lock()
self.doUpdateTVShows()
defaultLog( addonLanguage(32259) )
if ShowNotifications == "true":
doNotify( addonLanguage(32259), 5000 )
xbmc.sleep(5000)
def getDBTVShows( self ):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","params":{"properties":["uniqueid","rating","votes","playcount"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'tvshows' ):
for item in jSonResponse['result']['tvshows']:
TVShowID = item.get('tvshowid'); unique_id = item.get('uniqueid'); imdb_id = unique_id.get('imdb'); Title = item.get('label');
Rating = item.get('rating'); Votes = item.get('votes'); tvdb_id = unique_id.get('tvdb'); Watched = item.get('playcount'); tmdb_id = unique_id.get('tmdb');
self.AllTVShows.append( ( TVShowID, imdb_id, Title, Rating, Votes, tvdb_id, Watched, tmdb_id ) )
except: pass
def doUpdateEpisodes( self, tvshowid, tvshowtitle, tvshowtmdb_id, PCounter ):
global num_threads
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetEpisodes","params":{"tvshowid":' + str( tvshowid ) + ', "properties":["uniqueid","rating","votes","playcount","episode","season"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'episodes' ):
for item in jSonResponse['result']['episodes']:
while num_threads > max_threads:
xbmc.sleep(500)
EpisodeID = item.get('episodeid'); unique_id = item.get('uniqueid'); IMDb = unique_id.get('imdb')
Title = tvshowtitle + " " + str( item.get('season') ) + "x" + str( "%02d" % item.get('episode') );
Rating = item.get('rating'); Votes = item.get('votes'); Watched = item.get('playcount');
TVDB = unique_id.get('tvdb')
if ShowProgress == "true":
self.Progress.update( PCounter, addonLanguage(32262), Title )
if int(Watched) > 0 and ExcludeWatched == "true":
defaultLog( addonLanguage(32504) % ( Title ) )
continue
start_new_thread(thread_parse_IMDb_page,("episode",EpisodeID,IMDb,Title,Rating,Votes,TVDB,tvshowtmdb_id,item.get('season'),item.get('episode'),self.lock,self.flock))
self.lock.acquire()
num_threads += 1
self.lock.release()
except: pass
def doUpdateTVShows( self ):
global num_threads
AllTVShows = len( self.AllTVShows ); Counter = 0;
PCounter = 0
if ShowProgress == "true":
self.Progress = xbmcgui.DialogProgressBG()
self.Progress.create( addonLanguage(32262) )
for TVShow in self.AllTVShows:
while num_threads > max_threads:
xbmc.sleep(500)
if ShowProgress == "true":
Counter = Counter + 1
PCounter = (Counter*100)/AllTVShows
self.Progress.update( PCounter, addonLanguage(32262), TVShow[2] )
if int(TVShow[6]) > 0 and ExcludeWatched == "true":
defaultLog( addonLanguage(32504) % ( TVShow[2] ) )
continue
start_new_thread(thread_parse_IMDb_page,("tvshow",TVShow[0],TVShow[1],TVShow[2],TVShow[3],TVShow[4],TVShow[5],TVShow[7],"","",self.lock,self.flock))
self.lock.acquire()
num_threads += 1
self.lock.release()
if IncludeEpisodes == "true":
self.doUpdateEpisodes( TVShow[0], TVShow[2], TVShow[7], PCounter )
while num_threads > 0:
xbmc.sleep(500)
if ShowProgress == "true":
self.Progress.close()
def perform_update():
if addonSettings.getSetting( "PerformingUpdate" ) == "true":
xbmcgui.Dialog().ok( "%s" % ( addonName ), addonLanguage(32251) )
return
addonSettings.setSetting( "PerformingUpdate", "true" )
if onMovies == "true":
Movies()
if ShowNotifications == "true": xbmc.sleep(5000)
if onTVShows == "true": TVShows()
addonSettings.setSetting( "PerformingUpdate", "false" )
|
axlt2002/script.light.imdb.ratings.update
|
resources/core/update_main.py
|
Python
|
gpl-3.0
| 10,373
| 0.050998
|
"""
Settings for Bok Choy tests that are used when running LMS.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from tempfile import mkdtemp
from openedx.core.release import RELEASE_LINE
CONFIG_ROOT = path(__file__).abspath().dirname()
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
######################### Testing overrides ####################################
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = [
(TEST_ROOT / "staticfiles" / "lms").abspath(),
]
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
# Webpack loader must use webpack output setting
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = TEST_ROOT / "staticfiles" / "lms" / "webpack-stats.json"
# Don't use compression during tests
PIPELINE_JS_COMPRESSOR = None
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
BLOCK_STRUCTURES_SETTINGS = dict(
# We have CELERY_ALWAYS_EAGER set to True, so there's no asynchronous
# code running and the celery routing is unimportant.
# It does not make sense to retry.
TASK_MAX_RETRIES=0,
# course publish task delay is irrelevant is because the task is run synchronously
COURSE_PUBLISH_TASK_DELAY=0,
# retry delay is irrelevent because we never retry
TASK_DEFAULT_RETRY_DELAY=0,
)
###################### Grade Downloads ######################
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'),
}
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1'
EDXNOTES_CONNECT_TIMEOUT = 10 # time in seconds
EDXNOTES_READ_TIMEOUT = 10 # time in seconds
NOTES_DISABLED_TABS = []
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable oauth authentication, which we test.
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable Course Discovery
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Open up endpoint for faking Software Secure responses
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['TEST_TIMEOUT'] = 5000
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can reset here to use
# a simpler security model
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS
FEATURES['SHOW_BUMPER_PERIODICITY'] = 1
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
FEATURES['ENABLE_DISCUSSION_HOME_PANEL'] = True
# Enable support for OpenBadges accomplishments
FEATURES['ENABLE_OPENBADGES'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Verify student settings
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# this secret key should be the same as cms/envs/bok_choy.py's
SECRET_KEY = "very_secret_bok_choy_key"
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
# Make sure we test with the extended history table
FEATURES['ENABLE_CSMH_EXTENDED'] = True
INSTALLED_APPS += ('coursewarehistoryextended',)
BADGING_BACKEND = 'lms.djangoapps.badges.backends.tests.dummy_backend.DummyBackend'
# Configure the LMS to use our stub eCommerce implementation
ECOMMERCE_API_URL = 'http://localhost:8043/api/v2/'
LMS_ROOT_URL = "http://localhost:8000"
if RELEASE_LINE == "master":
# On master, acceptance tests use edX books, not the default Open edX books.
HELP_TOKENS_BOOKS = {
'learner': 'http://edx.readthedocs.io/projects/edx-guide-for-students',
'course_author': 'http://edx.readthedocs.io/projects/edx-partner-course-staff',
}
# TODO: TNL-6546: Remove this waffle and flag code.
from django.db.utils import ProgrammingError
from waffle.models import Flag
try:
flag, created = Flag.objects.get_or_create(name='unified_course_view')
WAFFLE_OVERRIDE = True
except ProgrammingError:
# during initial reset_db, the table for the flag doesn't yet exist.
pass
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
|
fintech-circle/edx-platform
|
lms/envs/bok_choy.py
|
Python
|
agpl-3.0
| 8,553
| 0.002923
|
import os
ADDRESS = '127.0.0.1'
PORT = 12345
BACKUP_DIR = 'Backup'
BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
|
gonczor/ServerPy
|
Setup/settings.py
|
Python
|
gpl-2.0
| 142
| 0
|
import os
import requests
if __name__ == "__main__":
session = requests.Session()
data = {"email": "admin@knex.com", "password": "admin"}
session.post("http://localhost:5000/api/users/login", data=data)
for file in os.listdir("."):
if file.endswith(".json"):
text = open(file, "r").read()
res = session.post("http://localhost:5000/api/projects", data=text.encode('utf-8'),
headers={'Content-Type': 'application/json'})
print(file + " " + str(res))
elif file.endswith(".json5"):
text = open(file, "r").read()
res = session.post("http://localhost:5000/api/projects", data=text.encode('utf-8'),
headers={'Content-Type': 'application/json5'})
print(file + " " + str(res))
session.get("http://localhost:5000/api/users/logout")
|
Drakulix/knex
|
evalData/testdata_insertion.py
|
Python
|
mit
| 897
| 0.00223
|
# -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted. This is the recommended setup as a webserver
#: should manually be set up to not route invalid hosts to the application.
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates weather the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""The stream to read incoming data from. Unlike :attr:`input_stream`
this stream is properly guarded that you can't accidentally read past
the length of the input. Werkzeug will internally always refer to
this stream to read data which makes it possible to wrap this
object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL"""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application root."""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme."""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property('QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
charset = self.charset
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
location = iri_to_uri(location)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
|
danimajo/pineapple_pdf
|
werkzeug/wrappers.py
|
Python
|
mit
| 76,131
| 0.000276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 University of Dundee & Open Microscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import str
from builtins import range
from builtins import object
import os
import uuid
import shutil
import logging
import tempfile
from scc.git import get_github, get_token_or_user
from subprocess import Popen
sandbox_url = "https://github.com/ome/snoopys-sandbox.git"
class SandboxTest(object):
def setup_method(self, method):
# Basic logging configuration so if a test fails we can see
# the statements at WARN or ERROR at least.
logging.basicConfig()
self.method = method.__name__
self.cwd = os.getcwd()
self.token = get_token_or_user(local=False)
self.gh = get_github(self.token, dont_ask=True)
self.user = self.gh.get_login()
self.path = tempfile.mkdtemp("", "sandbox-", ".")
self.path = os.path.abspath(self.path)
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "clone", "-q", sandbox_url, self.path],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
self.sandbox = self.gh.git_repo(self.path)
self.origin_remote = "origin"
except Exception:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
raise
# If we succeed, then we change to this dir.
os.chdir(self.path)
def shortDescription(self):
return None
def init_submodules(self):
"""
Fetch submodules after cloning the repository
"""
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "submodule", "update", "--init"],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
except Exception:
os.chdir(self.path)
raise
def uuid(self):
"""
Return a string representing a uuid.uuid4
"""
return str(uuid.uuid4())
def fake_branch(self, head="master", commits=None):
"""
Return a local branch with a list of commits, defaults to a single
commit adding a unique file
"""
name = self.uuid()
if commits is None:
commits = [(name, "hi")]
self.sandbox.new_branch(name, head=head)
for n in range(len(commits)):
fname, txt = commits[n]
fname = os.path.join(self.path, fname)
with open(fname, 'w') as f:
f.write(txt)
self.sandbox.add(fname)
self.sandbox.commit("%d: Writing %s" % (n, name))
self.sandbox.get_status()
return name
def add_remote(self):
"""
Add the remote of the authenticated Github user
"""
if self.user not in self.sandbox.list_remotes():
remote_url = "https://%s:x-oauth-basic@github.com/%s/%s.git" \
% (self.token, self.user, self.sandbox.origin.name)
self.sandbox.add_remote(self.user, remote_url)
def rename_origin_remote(self, new_name):
"""
Rename the remote used for the upstream repository
"""
self.sandbox.call("git", "remote", "rename", self.origin_remote,
new_name)
self.origin_remote = new_name
def push_branch(self, branch):
"""
Push a local branch to GitHub
"""
self.add_remote()
self.sandbox.push_branch(branch, remote=self.user)
def open_pr(self, branch, base, description=None):
"""
Push a local branch and open a PR against the selected base
"""
self.push_branch(branch)
if description is None:
description = ("This is a call to Sandbox.open_pr by %s" %
self.method)
new_pr = self.sandbox.origin.open_pr(
title="test %s" % branch,
description=description,
base=base,
head="%s:%s" % (self.user, branch))
return new_pr
def teardown_method(self, method):
try:
self.sandbox.cleanup()
finally:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
|
sbesson/snoopycrimecop
|
test/integration/Sandbox.py
|
Python
|
gpl-2.0
| 5,211
| 0
|
from django.conf import settings
from images.models import S3Connection
from shutil import copyfileobj
import tinys3
import os
import urllib
class LocalStorage(object):
def __init__(self, filename):
self.filename = filename
def get_file_data(self):
"""
Returns the raw data for the specified file
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
# TODO: do you need to close this?
data = open(image_path, 'r').read()
return data
def get_remote_path(self):
"""
Builds a relative remote path by combining the MEDIA_URL setting and the filename
"""
return '%s%s' % (settings.MEDIA_URL, self.filename)
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` to the local storage
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
with open(image_path, 'w') as fw:
copyfileobj(file_instance, fw)
@staticmethod
def create_argument_slug(arguments_dict):
"""
Converts an arguments dictionary into a string that can be stored in a filename
"""
# TODO: is there a possible bug if an invalid key/value is presented?
args_list = ['%s-%s' % (key, value) for key, value in arguments_dict.items()]
return '--'.join(args_list)
class S3Storage(LocalStorage):
def __init__(self, *args, **kwargs):
"""
Overrides the LocalStorage and initializes a shared S3 connection
"""
super(S3Storage, self).__init__(*args, **kwargs)
self.conn = tinys3.Connection(self.S3_ACCESS_KEY, self.S3_SECRET_KEY, default_bucket=self.S3_BUCKET, tls=True)
def get_remote_path(self):
"""
Returns an absolute remote path for the filename from the S3 bucket
"""
return 'https://%s.%s/%s' % (self.conn.default_bucket, self.conn.endpoint, self.filename)
def get_file_data(self):
"""
Returns the raw data for the specific file, downloading it from S3
"""
path = self.get_remote_path()
data = urllib.urlopen(path).read()
return data
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` from memory to S3
"""
self.conn.upload(self.filename, file_instance, content_type=content_type)
@property
def S3_BUCKET(self):
"""
Returns the S3_BUCKET. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_BUCKET', self.database_settings.bucket)
@property
def S3_ACCESS_KEY(self):
"""
Returns the S3_ACCESS_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_ACCESS_KEY', self.database_settings.access_key)
@property
def S3_SECRET_KEY(self):
"""
Returns the S3_SECRET_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_SECRET_KEY', self.database_settings.secret_key)
@property
def database_settings(self):
"""
Pulls an S3Connection instance, which contains S3 connection settings, from the databas. Result is cached locally
"""
if not getattr(self, '__database_settings', None):
self.__database_settings = S3Connection.objects.get()
return self.__database_settings
|
sokanu/frame
|
images/storage.py
|
Python
|
mit
| 3,547
| 0.004229
|
import os
import uuid
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
def avatar_upload(instance, filename):
ext = filename.split(".")[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join("avatars", filename)
class Profile(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=75, blank=True)
avatar = models.ImageField(upload_to=avatar_upload, blank=True)
bio = models.TextField(blank=True)
affiliation = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100, blank=True)
website = models.CharField(max_length=250, blank=True)
twitter_username = models.CharField("Twitter Username", max_length=100, blank=True)
created_at = models.DateTimeField(default=timezone.now)
modified_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
self.modified_at = timezone.now()
return super(Profile, self).save(*args, **kwargs)
@property
def display_name(self):
if self.name:
return self.name
else:
return self.user.username
|
new-player/share_projects
|
share_projects/profiles/models.py
|
Python
|
mit
| 1,201
| 0.000833
|
#!/usr/bin/python
from datetime import datetime
from collections import namedtuple
import sys, os
import gzip
import random, math
from optparse import OptionParser
options = None
## User for Orthology
best_query_taxon_score = {}
## Used for the Paralogy
BestInterTaxonScore = {}
BetterHit = {}
# class SimilarSequenceLine:
# def __init__(self, line):
# column = line.strip().split('\t')
# self.query_id = column[0]
# (self.query_taxon, self.query_seq) = column[0].split('|')
# self.subject_id = column[1]
# (self.subject_taxon,self.subject_seq) = column[1].split('|')
# self.evalue_mant = float(column[2])
# self.evalue_exp = int(column[3])
# #self.percent_ident = column[4]
# self.percent_match = float(column[4])
class SimilarSequenceLine(namedtuple('SimilarSequenceLine', 'query_id,query_taxon,query_seq,subject_id,subject_taxon,subject_seq,evalue_mant,evalue_exp,percent_match')):
__slots__ = ()
@classmethod
def _fromLine(cls, line, new=tuple.__new__, len=len):
'Make a new SimilarSequenceLine object from a sequence or iterable'
column = line.strip().split('\t')
(query_taxon, query_seq) = column[0].split('|')
(subject_taxon, subject_seq) = column[1].split('|')
iterable = (column[0], query_taxon, query_seq, column[1], subject_taxon, subject_seq, float(column[2]), int(column[3]), float(column[4]))
result = new(cls, iterable)
if len(result) != 9:
raise TypeError('Expected 9 arguments, got %d' % len(result))
return result
def readTaxonList(filename):
taxon_list = []
taxon_list_file = open(filename)
for line in taxon_list_file:
line = line.strip()
if line:
taxon_list += [line]
taxon_list_file.close()
return taxon_list
def memory_usage_resource():
import resource
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
return round(mem, 0)
def log(s):
global options
print >> sys.stderr, s
if options.logfile:
l = open(options.logfile, 'a')
l.write(s+'\n')
l.close()
def writeStoOutputFiles(s, out_bh_file):
global best_query_taxon_score, BestInterTaxonScore, options
try:
(cutoff_exp, cutoff_mant) = best_query_taxon_score[(s.query_id, s.subject_taxon)]
if (
s.query_taxon != s.subject_taxon and
s.evalue_exp < options.evalueExponentCutoff and
s.percent_match > options.percentMatchCutoff and
(s.evalue_mant < 0.01 or s.evalue_exp==cutoff_exp and s.evalue_mant==cutoff_mant)
):
out_bh_file.write('{0}\t{1}\t{2}\t{3}\n'.format(s.query_seq, s.subject_id, s.evalue_exp, s.evalue_mant))
except KeyError:
pass
if options.outInParalogTempFolder:
try:
(cutoff_exp, cutoff_mant) = BestInterTaxonScore[s.query_id]
if (s.query_taxon == s.subject_taxon and
s.query_id != s.subject_id and
s.evalue_exp <= options.evalueExponentCutoff and
s.percent_match >= options.percentMatchCutoff and
(s.evalue_mant < 0.01 or s.evalue_exp<cutoff_exp or (s.evalue_exp == cutoff_exp and s.evalue_mant<=cutoff_mant))
):
# try:
# BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)]
# except KeyError:
BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant)
except KeyError:
# Include the ones with
if (
s.query_taxon == s.subject_taxon and
(options.keepOrthoMCLBug or s.query_id != s.subject_id) and #### THIS IS an OrthoMCL bug
s.evalue_exp <= options.evalueExponentCutoff and
s.percent_match >= options.percentMatchCutoff
):
# try:
# BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)]
# except KeyError:
BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant)
if __name__ == '__main__':
usage = "This is STEP 5.1 of PorthoMCL.\n\nusage: %prog options\n"
parser = OptionParser(usage)
parser.add_option("-t", "--taxonlist", dest="taxonlistfile", help="A single column file containing the list of taxon to work with")
parser.add_option("-x", "--index", dest="index", help="An integer number identifying which taxon to work on [1-size_of_taxon_list]", type='int')
parser.add_option('-s', '--inSimSeq', dest='inSimSeq', help='Input folder that contains split similar sequences files (ss files)')
parser.add_option('-b', '--outBestHitFolder', dest='outBestHitFolder', help='folder that will stores Best Hit files (If not set, current folder)')
parser.add_option('-q', '--outInParalogTempFolder', dest='outInParalogTempFolder', help='folder to generate best InParalogTemp evalue scores (pt files) (required only for Paralogs)')
parser.add_option("-l", "--logfile", dest="logfile", help="log file (optional, if not supplied STDERR will be used)")
parser.add_option('', '--evalueExponentCutoff', dest='evalueExponentCutoff', help='evalue Exponent Cutoff (a nebative value, default=-5)', default=-5, type='int')
parser.add_option('', '--percentMatchCutoff', dest='percentMatchCutoff', help='percent Match Cutoff (integer value, default=50)', default=50, type='int')
parser.add_option('', '--cacheInputFile', dest='cacheInputFile', help='Cache input file or read it again. (Only use if I/O is very slow)', default=False, action="store_true")
parser.add_option('', '--keepOrthoMCLBug', dest='keepOrthoMCLBug', help='Keep the OrthoMCL bug in creating Temporary Paralogs files (pt files) where self hits are included', default=False, action="store_true")
#
(options, args) = parser.parse_args()
if len(args) != 0 or not options.taxonlistfile or not options.inSimSeq or not options.index:
parser.error("incorrect arguments.\n\t\tUse -h to get more information or refer to the MANUAL.md")
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(1, 'reading taxon list', options.index, '', memory_usage_resource(), datetime.now()))
taxon_list = readTaxonList(options.taxonlistfile)
if options.index <= 0 or options.index > len(taxon_list):
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('ERROR', 'Error in index', options.index, '', memory_usage_resource(), datetime.now()))
exit()
taxon1s = taxon_list[options.index - 1]
if options.cacheInputFile:
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('OPTION', 'Caching Input files', options.index, taxon1s, memory_usage_resource(), datetime.now()))
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(2, 'Reading similar sequences (ss file)', options.index, taxon1s, memory_usage_resource(), datetime.now()))
if options.outBestHitFolder and not os.path.exists(options.outBestHitFolder):
os.makedirs(options.outBestHitFolder)
if options.outInParalogTempFolder and not os.path.exists(options.outInParalogTempFolder):
os.makedirs(options.outInParalogTempFolder)
input_file_cache = []
with open(os.path.join(options.inSimSeq, taxon1s+'.ss.tsv')) as input_file:
for line in input_file:
ss = SimilarSequenceLine._fromLine(line)
if options.cacheInputFile:
input_file_cache += [ss]
if ss.query_taxon != ss.subject_taxon:
try:
best_query_taxon_score[(ss.query_id, ss.subject_taxon)] += [(ss.evalue_mant, ss.evalue_exp)]
except:
best_query_taxon_score[(ss.query_id, ss.subject_taxon)] = [(ss.evalue_mant, ss.evalue_exp)]
for (query_id,subject_taxon) in best_query_taxon_score:
evalues = best_query_taxon_score[(query_id, subject_taxon)]
min_exp = sys.maxint #min(evalues, key = lambda t: t[1])
min_mants = []
for (evalue_mant, evalue_exp) in evalues:
if evalue_exp < min_exp:
min_exp = evalue_exp
min_mants += [evalue_mant]
if evalue_mant == 0 and evalue_exp == 0:
min_mants += [evalue_mant]
best_query_taxon_score[(query_id,subject_taxon)] = (min_exp, min(min_mants))
if options.outInParalogTempFolder:
# log('{2} | Best Hit | {0} | {1} | * | {3} MB | {4}'.format(3 , 'Creating bestQueryTaxonScore (q-t file)', options.index, memory_usage_resource(), datetime.now() ))
# with open(os.path.join(options.outQueryTaxonScoreFolder, taxon1s+'.q-t.tsv'), 'w') as out_file:
# for (query_id,subject_taxon) in sorted(best_query_taxon_score):
# (ev_exp, ev_mant) = best_query_taxon_score[(query_id,subject_taxon)]
# out_file.write('{0}\t{1}\t{2}\t{3}\n'.format(query_id, subject_taxon, ev_exp, ev_mant))
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(3 , 'Creating BestInterTaxonScore Matirx', options.index,taxon1s, memory_usage_resource(), datetime.now() ))
for (query_id,subject_taxon) in best_query_taxon_score:
(ev_exp, ev_mant) = best_query_taxon_score[(query_id,subject_taxon)]
try:
(min_exp, mants) = BestInterTaxonScore[query_id]
if ev_exp < min_exp:
BestInterTaxonScore[query_id] = (ev_exp, [ev_mant])
elif ev_exp == min_exp:
BestInterTaxonScore[query_id] = (ev_exp, mants+[ev_mant])
except:
BestInterTaxonScore[query_id] = (ev_exp, [ev_mant])
for query_id in BestInterTaxonScore:
(ev_exp, ev_mants) = BestInterTaxonScore[query_id]
BestInterTaxonScore[query_id] = (ev_exp, min(ev_mants))
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(4 , 'Creating BestHit file needed for Orthology (bh file)', options.index, taxon1s, memory_usage_resource(), datetime.now() ))
BestHit = {}
if not options.outBestHitFolder:
options.outBestHitFolder = '.'
out_bh_file = open(os.path.join(options.outBestHitFolder, taxon1s+'.bh.tsv') ,'w')
if not options.cacheInputFile:
with open(os.path.join(options.inSimSeq, taxon1s+'.ss.tsv')) as input_file:
for line in input_file:
s = SimilarSequenceLine._fromLine(line)
writeStoOutputFiles(s, out_bh_file)
else:
for s in input_file_cache:
writeStoOutputFiles(s, out_bh_file)
out_bh_file.close()
if options.outInParalogTempFolder:
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(5 , 'Creating InParalogTemp file needed for InParalogs (pt file)', options.index, taxon1s, memory_usage_resource(), datetime.now() ))
out_pt_file = open(os.path.join(options.outInParalogTempFolder, taxon1s+'.pt.tsv') ,'w')
for (seq1, seq2) in BetterHit:
if seq1 < seq2:
(bh1_evalue_exp, bh1_evalue_mant) = BetterHit[(seq1, seq2)]
try:
(bh2_evalue_exp, bh2_evalue_mant) = BetterHit[(seq2, seq1)]
except:
continue
if bh1_evalue_mant < 0.01 or bh2_evalue_mant < 0.01:
unnormalized_score = (bh1_evalue_exp + bh2_evalue_exp) / -2
else:
unnormalized_score = (math.log10(bh1_evalue_mant * bh2_evalue_mant) + bh1_evalue_exp + bh2_evalue_exp) / -2
out_pt_file.write('{0}\t{1}\t{2}\n'.format(seq1, seq2, unnormalized_score))
out_pt_file.close()
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(6 , 'Done', options.index, taxon1s, memory_usage_resource(), datetime.now() ))
|
greatfireball/PorthoMCL
|
porthomclPairsBestHit.py
|
Python
|
gpl-3.0
| 10,894
| 0.026161
|
from __future__ import print_function
import numpy as np
from six import next
from six.moves import xrange
def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(poly.exterior)
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha))
ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor, linewidth=linewidth)
def plot_multipolygon(ax, geom, facecolor='red', alpha=0.5, linewidth=1):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, alpha=alpha, linewidth=linewidth)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, alpha=alpha, linewidth=linewidth)
def plot_linestring(ax, geom, color='black', linewidth=1):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:,0], a[:,1], color=color, linewidth=linewidth)
def plot_multilinestring(ax, geom, color='red', linewidth=1):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom.type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth)
elif geom.type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth)
def plot_point(ax, pt, marker='o', markersize=2):
""" Plot a single Point geometry """
ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, colormap='Set1', alpha=0.5, linewidth=1.0, axes=None):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
alpha : float (default 0.5)
Alpha value for polygon fill regions. Has no effect for
lines or points.
linewidth : float (default 1.0)
Line width for geometries.
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
if axes == None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
color = gencolor(len(s), colormap=colormap)
for geom in s:
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=next(color), alpha=alpha, linewidth=linewidth)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=next(color), linewidth=linewidth)
elif geom.type == 'Point':
plot_point(ax, geom)
plt.draw()
return ax
def plot_dataframe(s, column=None, colormap=None, alpha=0.5, linewidth=1.0,
categorical=False, legend=False, axes=None, scheme=None,
vmin=None, vmax=None,
k=5):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted.
categorical : bool (default False)
If False, colormap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
alpha : float (default 0.5)
Alpha value for polygon fill regions. Has no effect for
lines or points.
linewidth : float (default 1.0)
Line width for geometries.
legend : bool (default False)
Plot a legend (Experimental; currently for categorical
plots only)
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes
vmin : float
Minimum value for color map
vmax : float
Maximum value for color map
k : int (default 5)
Number of classes (ignored if scheme is None)
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, colormap=colormap, alpha=alpha,
linewidth=linewidth, axes=axes)
else:
if s[column].dtype is np.dtype('O'):
categorical = True
if categorical:
if colormap is None:
colormap = 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = [valuemap[k] for k in s[column]]
else:
values = s[column]
if scheme is not None:
values = __pysal_choro(values, scheme, k=k)
cmap = norm_cmap(values, colormap, Normalize, cm, mn=vmin, mx=vmax)
if axes == None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
for geom, value in zip(s.geometry, values):
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value),
alpha=alpha, linewidth=linewidth)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=cmap.to_rgba(value), linewidth=linewidth)
# TODO: color point geometries
elif geom.type == 'Point':
plot_point(ax, geom)
if legend:
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=alpha,
markersize=10, markerfacecolor=cmap.to_rgba(value)))
ax.legend(patches, categories, numpoints=1, loc='best')
else:
# TODO: show a colorbar
raise NotImplementedError
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
values
Series with values replaced with class identifier if PySAL is available, otherwise the original values are used
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
s0 = scheme
scheme = scheme.lower()
if scheme not in schemes:
scheme = 'quantiles'
print('Unrecognized scheme: ', s0)
print('Using Quantiles instead')
if k<2 or k>9:
print('Invalid k: ', k)
print('2<=k<=9, setting k=5 (default)')
k = 5
binning = schemes[scheme](values, k)
values = binning.yb
except ImportError:
print('PySAL not installed, setting map to default')
return values
def norm_cmap(values, cmap, normalize, cm, mn=None, mx=None):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
mn
Minimum value of cmap
mx
Maximum value of cmap
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
if mn is None: mn = min(values)
if mx is None: mx = max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap
|
fonnesbeck/geopandas
|
geopandas/plotting.py
|
Python
|
bsd-3-clause
| 10,488
| 0.003337
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import os
import ddt
import mock
from oslo_config import cfg
from manila import exception
from manila.share import configuration as config
from manila.share.drivers import ganesha
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
fake_basepath = '/fakepath'
fake_export_name = 'fakename--fakeaccid'
fake_output_template = {
'EXPORT': {
'Export_Id': 101,
'Path': '/fakepath/fakename',
'Pseudo': '/fakepath/fakename--fakeaccid',
'Tag': 'fakeaccid',
'CLIENT': {
'Clients': '10.0.0.1'
},
'FSAL': 'fakefsal'
}
}
@ddt.ddt
class GaneshaNASHelperTestCase(test.TestCase):
"""Tests GaneshaNASHElper."""
def setUp(self):
super(GaneshaNASHelperTestCase, self).setUp()
CONF.set_default('ganesha_config_path', '/fakedir0/fakeconfig')
CONF.set_default('ganesha_db_path', '/fakedir1/fake.db')
CONF.set_default('ganesha_export_dir', '/fakedir0/export.d')
CONF.set_default('ganesha_export_template_dir',
'/fakedir2/faketempl.d')
CONF.set_default('ganesha_service_name', 'ganesha.fakeservice')
self._execute = mock.Mock(return_value=('', ''))
self.fake_conf = config.Configuration(None)
self.fake_conf_dir_path = '/fakedir0/exports.d'
self._helper = ganesha.GaneshaNASHelper(
self._execute, self.fake_conf, tag='faketag')
self._helper.ganesha = mock.Mock()
self._helper.export_template = {'key': 'value'}
self.share = fake_share.fake_share()
self.access = fake_share.fake_access()
def test_load_conf_dir(self):
fake_template1 = {'key': 'value1'}
fake_template2 = {'key': 'value2'}
fake_ls_dir = ['fakefile0.conf', 'fakefile1.json', 'fakefile2.txt']
mock_ganesha_utils_patch = mock.Mock()
def fake_patch_run(tmpl1, tmpl2):
mock_ganesha_utils_patch(
copy.deepcopy(tmpl1), copy.deepcopy(tmpl2))
tmpl1.update(tmpl2)
self.mock_object(ganesha.os, 'listdir',
mock.Mock(return_value=fake_ls_dir))
self.mock_object(ganesha.LOG, 'info')
self.mock_object(ganesha.ganesha_manager, 'parseconf',
mock.Mock(side_effect=[fake_template1,
fake_template2]))
self.mock_object(ganesha.ganesha_utils, 'patch',
mock.Mock(side_effect=fake_patch_run))
with mock.patch('six.moves.builtins.open',
mock.mock_open()) as mockopen:
mockopen().read.side_effect = ['fakeconf0', 'fakeconf1']
ret = self._helper._load_conf_dir(self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(
self.fake_conf_dir_path)
ganesha.LOG.info.assert_called_once_with(
mock.ANY, self.fake_conf_dir_path)
mockopen.assert_has_calls([
mock.call('/fakedir0/exports.d/fakefile0.conf'),
mock.call('/fakedir0/exports.d/fakefile1.json')],
any_order=True)
ganesha.ganesha_manager.parseconf.assert_has_calls([
mock.call('fakeconf0'), mock.call('fakeconf1')])
mock_ganesha_utils_patch.assert_has_calls([
mock.call({}, fake_template1),
mock.call(fake_template1, fake_template2)])
self.assertEqual(fake_template2, ret)
def test_load_conf_dir_no_conf_dir_must_exist_false(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT))))
self.mock_object(ganesha.LOG, 'info')
self.mock_object(ganesha.ganesha_manager, 'parseconf')
self.mock_object(ganesha.ganesha_utils, 'patch')
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data='fakeconf')) as mockopen:
ret = self._helper._load_conf_dir(self.fake_conf_dir_path,
must_exist=False)
ganesha.os.listdir.assert_called_once_with(
self.fake_conf_dir_path)
ganesha.LOG.info.assert_called_once_with(
mock.ANY, self.fake_conf_dir_path)
self.assertFalse(mockopen.called)
self.assertFalse(ganesha.ganesha_manager.parseconf.called)
self.assertFalse(ganesha.ganesha_utils.patch.called)
self.assertEqual({}, ret)
def test_load_conf_dir_error_no_conf_dir_must_exist_true(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT))))
self.assertRaises(OSError, self._helper._load_conf_dir,
self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_load_conf_dir_error_conf_dir_present_must_exist_false(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.EACCES,
os.strerror(errno.EACCES))))
self.assertRaises(OSError, self._helper._load_conf_dir,
self.fake_conf_dir_path, must_exist=False)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_load_conf_dir_error(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=RuntimeError('fake error')))
self.assertRaises(RuntimeError, self._helper._load_conf_dir,
self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_init_helper(self):
mock_template = mock.Mock()
mock_ganesha_manager = mock.Mock()
self.mock_object(ganesha.ganesha_manager, 'GaneshaManager',
mock.Mock(return_value=mock_ganesha_manager))
self.mock_object(self._helper, '_load_conf_dir',
mock.Mock(return_value=mock_template))
self.mock_object(self._helper, '_default_config_hook')
ret = self._helper.init_helper()
ganesha.ganesha_manager.GaneshaManager.assert_called_once_with(
self._execute, 'faketag',
ganesha_config_path='/fakedir0/fakeconfig',
ganesha_export_dir='/fakedir0/export.d',
ganesha_db_path='/fakedir1/fake.db',
ganesha_service_name='ganesha.fakeservice')
self._helper._load_conf_dir.assert_called_once_with(
'/fakedir2/faketempl.d', must_exist=False)
self.assertFalse(self._helper._default_config_hook.called)
self.assertEqual(mock_ganesha_manager, self._helper.ganesha)
self.assertEqual(mock_template, self._helper.export_template)
self.assertIsNone(ret)
def test_init_helper_conf_dir_empty(self):
mock_template = mock.Mock()
mock_ganesha_manager = mock.Mock()
self.mock_object(ganesha.ganesha_manager, 'GaneshaManager',
mock.Mock(return_value=mock_ganesha_manager))
self.mock_object(self._helper, '_load_conf_dir',
mock.Mock(return_value={}))
self.mock_object(self._helper, '_default_config_hook',
mock.Mock(return_value=mock_template))
ret = self._helper.init_helper()
ganesha.ganesha_manager.GaneshaManager.assert_called_once_with(
self._execute, 'faketag',
ganesha_config_path='/fakedir0/fakeconfig',
ganesha_export_dir='/fakedir0/export.d',
ganesha_db_path='/fakedir1/fake.db',
ganesha_service_name='ganesha.fakeservice')
self._helper._load_conf_dir.assert_called_once_with(
'/fakedir2/faketempl.d', must_exist=False)
self._helper._default_config_hook.assert_called_once_with()
self.assertEqual(mock_ganesha_manager, self._helper.ganesha)
self.assertEqual(mock_template, self._helper.export_template)
self.assertIsNone(ret)
def test_default_config_hook(self):
fake_template = {'key': 'value'}
self.mock_object(ganesha.ganesha_utils, 'path_from',
mock.Mock(return_value='/fakedir3/fakeconfdir'))
self.mock_object(self._helper, '_load_conf_dir',
mock.Mock(return_value=fake_template))
ret = self._helper._default_config_hook()
ganesha.ganesha_utils.path_from.assert_called_once_with(
ganesha.__file__, 'conf')
self._helper._load_conf_dir.assert_called_once_with(
'/fakedir3/fakeconfdir')
self.assertEqual(fake_template, ret)
def test_fsal_hook(self):
ret = self._helper._fsal_hook('/fakepath', self.share, self.access)
self.assertEqual({}, ret)
def test_allow_access(self):
mock_ganesha_utils_patch = mock.Mock()
def fake_patch_run(tmpl1, tmpl2, tmpl3):
mock_ganesha_utils_patch(copy.deepcopy(tmpl1), tmpl2, tmpl3)
tmpl1.update(tmpl3)
self.mock_object(self._helper.ganesha, 'get_export_id',
mock.Mock(return_value=101))
self.mock_object(self._helper, '_fsal_hook',
mock.Mock(return_value='fakefsal'))
self.mock_object(ganesha.ganesha_utils, 'patch',
mock.Mock(side_effect=fake_patch_run))
ret = self._helper._allow_access(fake_basepath, self.share,
self.access)
self._helper.ganesha.get_export_id.assert_called_once_with()
self._helper._fsal_hook.assert_called_once_with(
fake_basepath, self.share, self.access)
mock_ganesha_utils_patch.assert_called_once_with(
{}, self._helper.export_template, fake_output_template)
self._helper._fsal_hook.assert_called_once_with(
fake_basepath, self.share, self.access)
self._helper.ganesha.add_export.assert_called_once_with(
fake_export_name, fake_output_template)
self.assertIsNone(ret)
def test_allow_access_error_invalid_share(self):
access = fake_share.fake_access(access_type='notip')
self.assertRaises(exception.InvalidShareAccess,
self._helper._allow_access, '/fakepath',
self.share, access)
def test_deny_access(self):
ret = self._helper._deny_access('/fakepath', self.share, self.access)
self._helper.ganesha.remove_export.assert_called_once_with(
'fakename--fakeaccid')
self.assertIsNone(ret)
@ddt.data({}, {'recovery': False})
def test_update_access_for_allow(self, kwargs):
self.mock_object(self._helper, '_allow_access')
self.mock_object(self._helper, '_deny_access')
self._helper.update_access(
'/some/path', 'aShare', add_rules=["example.com"], delete_rules=[],
**kwargs)
self._helper._allow_access.assert_called_once_with(
'/some/path', 'aShare', 'example.com')
self.assertFalse(self._helper._deny_access.called)
self.assertFalse(self._helper.ganesha.reset_exports.called)
self.assertFalse(self._helper.ganesha.restart_service.called)
def test_update_access_for_deny(self):
self.mock_object(self._helper, '_allow_access')
self.mock_object(self._helper, '_deny_access')
self._helper.update_access(
'/some/path', 'aShare', [], delete_rules=["example.com"])
self._helper._deny_access.assert_called_once_with(
'/some/path', 'aShare', 'example.com')
self.assertFalse(self._helper._allow_access.called)
self.assertFalse(self._helper.ganesha.reset_exports.called)
self.assertFalse(self._helper.ganesha.restart_service.called)
def test_update_access_recovery(self):
self.mock_object(self._helper, '_allow_access')
self.mock_object(self._helper, '_deny_access')
self._helper.update_access(
'/some/path', 'aShare', add_rules=["example.com"], delete_rules=[],
recovery=True)
self._helper._allow_access.assert_called_once_with(
'/some/path', 'aShare', 'example.com')
self.assertFalse(self._helper._deny_access.called)
self.assertTrue(self._helper.ganesha.reset_exports.called)
self.assertTrue(self._helper.ganesha.restart_service.called)
|
NetApp/manila
|
manila/tests/share/drivers/test_ganesha.py
|
Python
|
apache-2.0
| 13,337
| 0
|
# setup.py: based off setup.py for toil-vg, modified to install this pipeline
# instead.
import sys
import os
# Get the local version.py and not any other version module
execfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "version.py"))
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
kwargs = dict(
name='hgvm-builder',
version=version,
description="Human Genome Variation Map construction kit",
author='Adam Novak',
author_email='anovak@soe.ucsc.edu',
url="https://github.com/BD2KGenomics/hgvm-builder",
install_requires=[package + ver for package, ver in required_versions.iteritems()],
dependency_links = dependency_links,
tests_require=['pytest==2.8.3'],
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={
'console_scripts': [
'build-hgvm = hgvmbuilder.build:entrypoint',
'copy-hgvm = hgvmbuilder.parallelcopy:entrypoint',
'import-sam-hgvm = hgvmbuilder.importsam:entrypoint'
]})
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
# Sanitize command line arguments to avoid confusing Toil code
# attempting to parse them
sys.argv[1:] = []
errno = pytest.main(self.pytest_args)
sys.exit(errno)
kwargs['cmdclass'] = {'test': PyTest}
setup(**kwargs)
# Wen we run setup, tell the user they need a good Toil with cloud support
print("""
Thank you for installing the hgvm-builder pipeline!
If you want to run this Toil-based pipeline on a cluster in a cloud, please
install Toil with the appropriate extras. For example, To install AWS/EC2
support for example, run
pip install toil[aws,mesos]{}
on every EC2 instance. For Microsoft Azure, deploy your cluster using the Toil
template at
https://github.com/BD2KGenomics/toil/tree/master/contrib/azure
For more information, please refer to Toil's documentation at
http://toil.readthedocs.io/en/latest/installation.html
To start building HGVMs, run
build-hgvm --help 2>&1 | less
""".format(required_versions['toil']))
|
adamnovak/hgvm-builder
|
setup.py
|
Python
|
apache-2.0
| 2,450
| 0.003673
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMultiEditToolButton.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '16/03/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.gui import QgsMultiEditToolButton
from qgis.testing import start_app, unittest
start_app()
class TestQgsMultiEditToolButton(unittest.TestCase):
def test_state_logic(self):
"""
Test that the logic involving button states is correct
"""
w = QgsMultiEditToolButton()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# set is changed should update state to changed
w.setIsChanged(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.Changed)
w.setIsChanged(False)
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# resetting changes should fall back to default state
w.setIsChanged(True)
w.resetChanges()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# setting changes committed should result in default state
w.setIsChanged(True)
w.changesCommitted()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# Test with mixed values
w.setIsMixed(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# changed state takes priority over mixed state
w.setIsChanged(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.Changed)
w.setIsChanged(False)
# should reset to mixed state
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# resetting changes should fall back to mixed state
w.setIsChanged(True)
w.resetChanges()
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# setting changes committed should result in default state
w.setIsChanged(True)
w.changesCommitted()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
if __name__ == '__main__':
unittest.main()
|
pblottiere/QGIS
|
tests/src/python/test_qgsmultiedittoolbutton.py
|
Python
|
gpl-2.0
| 2,332
| 0
|
import os
import json
import logging
import ConfigParser
from framework.db import models
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import MappingDBInterface
from framework.lib.exceptions import InvalidMappingReference
class MappingDB(BaseComponent, MappingDBInterface):
COMPONENT_NAME = "mapping_db"
def __init__(self):
"""
The mapping_types attributes contain the unique mappings in memory
"""
self.register_in_service_locator()
self.config = self.get_component("config")
self.db = self.get_component("db")
self.mapping_types = []
self.error_handler = self.get_component("error_handler")
def init(self):
self.LoadMappingDBFromFile(self.config.get_profile_path("MAPPING_PROFILE"))
def LoadMappingDBFromFile(self, file_path):
"""
This needs to be a list instead of a dictionary to preserve order in
python < 2.7
"""
file_path = self.config.select_user_or_default_config_path(file_path)
logging.info("Loading Mapping from: %s..", file_path)
config_parser = ConfigParser.RawConfigParser()
# Otherwise all the keys are converted to lowercase xD
config_parser.optionxform = str
if not os.path.isfile(file_path): # check if the mapping file exists
self.error_handler.FrameworkAbort("Mapping file not found at: %s" % file_path)
config_parser.read(file_path)
for owtf_code in config_parser.sections():
mappings = {}
category = None
for mapping_type, data in config_parser.items(owtf_code):
if mapping_type != 'category':
if mapping_type not in self.mapping_types:
self.mapping_types.append(mapping_type)
mapped_code, mapped_name = data.split('_____')
mappings[mapping_type] = [mapped_code, mapped_name]
else:
category = data
self.db.session.merge(models.Mapping(owtf_code=owtf_code, mappings=json.dumps(mappings), category=category))
self.db.session.commit()
def DeriveMappingDict(self, obj):
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state", None)
# If output is present, json decode it
if pdict.get("mappings", None):
pdict["mappings"] = json.loads(pdict["mappings"])
return pdict
def DeriveMappingDicts(self, obj_list):
dict_list = []
for obj in obj_list:
dict_list.append(self.DeriveMappingDict(obj))
return dict_list
def GetMappingTypes(self):
"""
In memory data saved when loading db
"""
return self.mapping_types
def GetMappings(self, mapping_type):
if mapping_type in self.mapping_types:
mapping_objs = self.db.session.query(models.Mapping).all()
mappings = {}
for mapping_dict in self.DeriveMappingDicts(mapping_objs):
if mapping_dict["mappings"].get(mapping_type, None):
mappings[mapping_dict["owtf_code"]] = mapping_dict["mappings"][mapping_type]
return mappings
else:
raise InvalidMappingReference("InvalidMappingReference %s requested" % mapping_type)
def GetCategory(self, plugin_code):
category = self.db.session.query(models.Mapping.category).get(plugin_code)
# Getting the corresponding category back from db
return category
|
DarKnight24/owtf
|
framework/db/mapping_manager.py
|
Python
|
bsd-3-clause
| 3,644
| 0.001647
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.getGui().input(name, defaultValue)
def select(self, name, options, defaultValue=""):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return self.z.getGui().select(name, defaultValue, javaOptions)
def checkbox(self, name, options, defaultChecked=[]):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
javaDefaultCheck = self.javaList()
for check in defaultChecked:
javaDefaultCheck.append(check)
return self.z.getGui().checkbox(name, javaDefaultCheck, javaOptions)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width={width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except Py4JNetworkError:
# lost connection from gateway server. exit
sys.exit(1)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
|
wary/zeppelin
|
python/src/main/resources/python/zeppelin_python.py
|
Python
|
apache-2.0
| 9,381
| 0.012685
|
from c3nav.editor.models.changedobject import ChangedObject # noqa
from c3nav.editor.models.changeset import ChangeSet # noqa
from c3nav.editor.models.changesetupdate import ChangeSetUpdate # noqa
|
c3nav/c3nav
|
src/c3nav/editor/models/__init__.py
|
Python
|
apache-2.0
| 200
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('remote_id', models.IntegerField(null=True, blank=True)),
('remote_uri', models.CharField(max_length=256, null=True, blank=True)),
('profile_uri', models.CharField(max_length=256, null=True, blank=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
hzlf/openbroadcast.ch
|
app/remoteauth/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 3,171
| 0.00473
|
import socket
from heapq import heappush, heappop, heapify
from collections import defaultdict
##defbig
def encode(symb2freq):
"""Huffman encode the given dict mapping symbols to weights"""
heap = [[wt, [sym, ""]] for sym, wt in symb2freq.items()]
heapify(heap)
while len(heap) > 1:
lo = heappop(heap)
hi = heappop(heap)
for pair in lo[1:]:
pair[1] = '1' + pair[1]
for pair in hi[1:]:
pair[1] = '0' + pair[1]
heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])
return sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p))
##defend
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 1743
s.connect((host, port))
s.send("#BEGIN")
s.send("!")
f = open('a.txt', 'r')
#for line in f.readlines():
txt = 'mississippi river'
symb2freq = defaultdict(int)
for ch in txt:
symb2freq[ch] += 1
huff = encode(symb2freq)
for p in huff:
s.send("{0},{1},{2}".format(p[0], symb2freq[p[0]], p[1]))
s.send("#END")
s.close()
##############3/////////////
|
CSE-SOE-CUSAT/NOSLab
|
CSA/unsorted/username/client.py
|
Python
|
mit
| 1,125
| 0.013333
|
import asyncio
import json
import logging
import os
from typing import List, Optional
import aiohttp
import aiohttp_session
import uvloop
from aiohttp import web
from prometheus_async.aio.web import server_stats # type: ignore
from gear import (
Database,
Transaction,
check_csrf_token,
create_session,
maybe_parse_bearer_header,
monitor_endpoints_middleware,
rest_authenticated_developers_only,
rest_authenticated_users_only,
setup_aiohttp_session,
transaction,
web_authenticated_developers_only,
web_authenticated_users_only,
web_maybe_authenticated_user,
)
from gear.cloud_config import get_global_config
from hailtop import httpx
from hailtop.config import get_deploy_config
from hailtop.hail_logging import AccessLogger
from hailtop.tls import internal_server_ssl_context
from hailtop.utils import secret_alnum_string
from web_common import render_template, set_message, setup_aiohttp_jinja2, setup_common_static_routes
from .exceptions import (
AuthUserError,
DuplicateLoginID,
DuplicateUsername,
EmptyLoginID,
InvalidType,
InvalidUsername,
MultipleExistingUsers,
MultipleUserTypes,
PreviouslyDeletedUser,
UnknownUser,
)
from .flow import get_flow_client
log = logging.getLogger('auth')
uvloop.install()
CLOUD = get_global_config()['cloud']
ORGANIZATION_DOMAIN = os.environ['HAIL_ORGANIZATION_DOMAIN']
deploy_config = get_deploy_config()
routes = web.RouteTableDef()
async def user_from_login_id(db, login_id):
users = [x async for x in db.select_and_fetchall("SELECT * FROM users WHERE login_id = %s;", login_id)]
if len(users) == 1:
return users[0]
assert len(users) == 0, users
return None
async def users_with_username_or_login_id(tx: Transaction, username: str, login_id: Optional[str]) -> List[dict]:
where_conditions = ['username = %s']
where_args = [username]
if login_id is not None:
where_conditions.append('login_id = %s')
where_args.append(login_id)
existing_users = [
x
async for x in tx.execute_and_fetchall(
f"SELECT * FROM users WHERE {' OR '.join(where_conditions)} LOCK IN SHARE MODE;", where_args
)
]
return existing_users
async def check_valid_new_user(tx: Transaction, username, login_id, is_developer, is_service_account) -> Optional[dict]:
if not isinstance(username, str):
raise InvalidType('username', username, 'str')
if login_id is not None and not isinstance(login_id, str):
raise InvalidType('login_id', login_id, 'str')
if not isinstance(is_developer, bool):
raise InvalidType('is_developer', is_developer, 'bool')
if not isinstance(is_service_account, bool):
raise InvalidType('is_service_account', is_service_account, 'bool')
if is_developer and is_service_account:
raise MultipleUserTypes(username)
if not is_service_account and not login_id:
raise EmptyLoginID(username)
if not username or not all(c for c in username if c.isalnum()):
raise InvalidUsername(username)
existing_users = await users_with_username_or_login_id(tx, username, login_id)
if len(existing_users) > 1:
raise MultipleExistingUsers(username, login_id)
if len(existing_users) == 1:
existing_user = existing_users[0]
expected_username = existing_user['username']
expected_login_id = existing_user['login_id']
if username != expected_username:
raise DuplicateLoginID(expected_username, login_id)
if login_id != expected_login_id:
raise DuplicateUsername(username, expected_login_id)
if existing_user['state'] in ('deleting', 'deleted'):
raise PreviouslyDeletedUser(username)
return existing_user
return None
async def insert_new_user(
db: Database, username: str, login_id: Optional[str], is_developer: bool, is_service_account: bool
) -> bool:
@transaction(db)
async def _insert(tx):
existing_user = await check_valid_new_user(tx, username, login_id, is_developer, is_service_account)
if existing_user is not None:
return False
await tx.execute_insertone(
'''
INSERT INTO users (state, username, login_id, is_developer, is_service_account)
VALUES (%s, %s, %s, %s, %s);
''',
('creating', username, login_id, is_developer, is_service_account),
)
await _insert() # pylint: disable=no-value-for-parameter
return True
def cleanup_session(session):
def _delete(key):
if key in session:
del session[key]
_delete('pending')
_delete('login_id')
_delete('next')
_delete('caller')
_delete('session_id')
_delete('flow')
@routes.get('/healthcheck')
async def get_healthcheck(request): # pylint: disable=W0613
return web.Response()
@routes.get('')
@routes.get('/')
async def get_index(request): # pylint: disable=unused-argument
return aiohttp.web.HTTPFound(deploy_config.external_url('auth', '/login'))
@routes.get('/creating')
@web_maybe_authenticated_user
async def creating_account(request, userdata):
db = request.app['db']
session = await aiohttp_session.get_session(request)
if 'pending' in session:
login_id = session['login_id']
user = await user_from_login_id(db, login_id)
nb_url = deploy_config.external_url('notebook', '')
next_page = session.pop('next', nb_url)
cleanup_session(session)
if user is None:
set_message(session, f'Account does not exist for login id {login_id}.', 'error')
return aiohttp.web.HTTPFound(nb_url)
page_context = {'username': user['username'], 'state': user['state'], 'login_id': user['login_id']}
if user['state'] == 'deleting' or user['state'] == 'deleted':
return await render_template('auth', request, userdata, 'account-error.html', page_context)
if user['state'] == 'active':
session_id = await create_session(db, user['id'])
session['session_id'] = session_id
set_message(session, f'Account has been created for {user["username"]}.', 'info')
return aiohttp.web.HTTPFound(next_page)
assert user['state'] == 'creating'
session['pending'] = True
session['login_id'] = login_id
session['next'] = next_page
return await render_template('auth', request, userdata, 'account-creating.html', page_context)
return aiohttp.web.HTTPUnauthorized()
@routes.get('/creating/wait')
async def creating_account_wait(request):
session = await aiohttp_session.get_session(request)
if 'pending' not in session:
raise web.HTTPUnauthorized()
return await _wait_websocket(request, session['login_id'])
async def _wait_websocket(request, login_id):
app = request.app
db = app['db']
user = await user_from_login_id(db, login_id)
if not user:
return web.HTTPNotFound()
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
count = 0
while count < 10:
try:
user = await user_from_login_id(db, login_id)
assert user
if user['state'] != 'creating':
log.info(f"user {user['username']} is no longer creating")
break
except asyncio.CancelledError:
raise
except Exception: # pylint: disable=broad-except
log.exception(f"/creating/wait: error while updating status for user {user['username']}")
await asyncio.sleep(1)
count += 1
if count >= 10:
log.info(f"user {user['username']} is still in state creating")
ready = user['state'] == 'active'
await ws.send_str(str(int(ready)))
return ws
finally:
await ws.close()
@routes.get('/signup')
async def signup(request):
next_page = request.query.get('next', deploy_config.external_url('notebook', ''))
flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback'))
session = await aiohttp_session.new_session(request)
cleanup_session(session)
session['next'] = next_page
session['caller'] = 'signup'
session['flow'] = flow_data
return aiohttp.web.HTTPFound(flow_data['authorization_url'])
@routes.get('/login')
async def login(request):
next_page = request.query.get('next', deploy_config.external_url('notebook', ''))
flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback'))
session = await aiohttp_session.new_session(request)
cleanup_session(session)
session['next'] = next_page
session['caller'] = 'login'
session['flow'] = flow_data
return aiohttp.web.HTTPFound(flow_data['authorization_url'])
@routes.get('/oauth2callback')
async def callback(request):
session = await aiohttp_session.get_session(request)
if 'flow' not in session:
raise web.HTTPUnauthorized()
nb_url = deploy_config.external_url('notebook', '')
creating_url = deploy_config.external_url('auth', '/creating')
caller = session['caller']
next_page = session.pop('next', nb_url)
flow_dict = session['flow']
flow_dict['callback_uri'] = deploy_config.external_url('auth', '/oauth2callback')
cleanup_session(session)
try:
flow_result = request.app['flow_client'].receive_callback(request, flow_dict)
login_id = flow_result.login_id
except asyncio.CancelledError:
raise
except Exception as e:
log.exception('oauth2 callback: could not fetch and verify token')
raise web.HTTPUnauthorized() from e
db = request.app['db']
user = await user_from_login_id(db, login_id)
if user is None:
if caller == 'login':
set_message(session, f'Account does not exist for login id {login_id}', 'error')
return aiohttp.web.HTTPFound(nb_url)
assert caller == 'signup'
username, domain = flow_result.email.split('@')
username = ''.join(c for c in username if c.isalnum())
if domain != ORGANIZATION_DOMAIN:
raise web.HTTPUnauthorized()
try:
await insert_new_user(db, username, login_id, is_developer=False, is_service_account=False)
except AuthUserError as e:
set_message(session, e.message, 'error')
return web.HTTPFound(deploy_config.external_url('notebook', ''))
session['pending'] = True
session['login_id'] = login_id
return web.HTTPFound(creating_url)
if user['state'] in ('deleting', 'deleted'):
page_context = {'username': user['username'], 'state': user['state'], 'login_id': user['login_id']}
return await render_template('auth', request, user, 'account-error.html', page_context)
if user['state'] == 'creating':
if caller == 'signup':
set_message(session, f'Account is already creating for login id {login_id}', 'error')
if caller == 'login':
set_message(session, f'Account for login id {login_id} is still being created.', 'error')
session['pending'] = True
session['login_id'] = user['login_id']
return web.HTTPFound(creating_url)
assert user['state'] == 'active'
if caller == 'signup':
set_message(session, f'Account has already been created for {user["username"]}.', 'info')
session_id = await create_session(db, user['id'])
session['session_id'] = session_id
return aiohttp.web.HTTPFound(next_page)
@routes.post('/api/v1alpha/users/{user}/create')
@rest_authenticated_developers_only
async def create_user(request: web.Request, userdata): # pylint: disable=unused-argument
db: Database = request.app['db']
username = request.match_info['user']
body = await request.json()
login_id = body['login_id']
is_developer = body['is_developer']
is_service_account = body['is_service_account']
try:
await insert_new_user(db, username, login_id, is_developer, is_service_account)
except AuthUserError as e:
raise e.http_response()
return web.json_response()
@routes.get('/user')
@web_authenticated_users_only()
async def user_page(request, userdata):
return await render_template('auth', request, userdata, 'user.html', {'cloud': CLOUD})
async def create_copy_paste_token(db, session_id, max_age_secs=300):
copy_paste_token = secret_alnum_string()
await db.just_execute(
"INSERT INTO copy_paste_tokens (id, session_id, max_age_secs) VALUES(%s, %s, %s);",
(copy_paste_token, session_id, max_age_secs),
)
return copy_paste_token
@routes.post('/copy-paste-token')
@check_csrf_token
@web_authenticated_users_only()
async def get_copy_paste_token(request, userdata):
session = await aiohttp_session.get_session(request)
session_id = session['session_id']
db = request.app['db']
copy_paste_token = await create_copy_paste_token(db, session_id)
page_context = {'copy_paste_token': copy_paste_token}
return await render_template('auth', request, userdata, 'copy-paste-token.html', page_context)
@routes.post('/api/v1alpha/copy-paste-token')
@rest_authenticated_users_only
async def get_copy_paste_token_api(request, userdata):
session_id = userdata['session_id']
db = request.app['db']
copy_paste_token = await create_copy_paste_token(db, session_id)
return web.Response(body=copy_paste_token)
@routes.post('/logout')
@check_csrf_token
@web_maybe_authenticated_user
async def logout(request, userdata):
if not userdata:
return web.HTTPFound(deploy_config.external_url('notebook', ''))
db = request.app['db']
session_id = userdata['session_id']
await db.just_execute('DELETE FROM sessions WHERE session_id = %s;', session_id)
session = await aiohttp_session.get_session(request)
cleanup_session(session)
return web.HTTPFound(deploy_config.external_url('notebook', ''))
@routes.get('/api/v1alpha/login')
async def rest_login(request):
callback_port = request.query['callback_port']
callback_uri = f'http://127.0.0.1:{callback_port}/oauth2callback'
flow_data = request.app['flow_client'].initiate_flow(callback_uri)
flow_data['callback_uri'] = callback_uri
# keeping authorization_url and state for backwards compatibility
return web.json_response(
{'flow': flow_data, 'authorization_url': flow_data['authorization_url'], 'state': flow_data['state']}
)
@routes.get('/roles')
@web_authenticated_developers_only()
async def get_roles(request, userdata):
db = request.app['db']
roles = [x async for x in db.select_and_fetchall('SELECT * FROM roles;')]
page_context = {'roles': roles}
return await render_template('auth', request, userdata, 'roles.html', page_context)
@routes.post('/roles')
@check_csrf_token
@web_authenticated_developers_only()
async def post_create_role(request, userdata): # pylint: disable=unused-argument
session = await aiohttp_session.get_session(request)
db = request.app['db']
post = await request.post()
name = post['name']
role_id = await db.execute_insertone(
'''
INSERT INTO `roles` (`name`)
VALUES (%s);
''',
(name),
)
set_message(session, f'Created role {role_id} {name}.', 'info')
return web.HTTPFound(deploy_config.external_url('auth', '/roles'))
@routes.get('/users')
@web_authenticated_developers_only()
async def get_users(request, userdata):
db = request.app['db']
users = [x async for x in db.select_and_fetchall('SELECT * FROM users;')]
page_context = {'users': users}
return await render_template('auth', request, userdata, 'users.html', page_context)
@routes.post('/users')
@check_csrf_token
@web_authenticated_developers_only()
async def post_create_user(request, userdata): # pylint: disable=unused-argument
session = await aiohttp_session.get_session(request)
db = request.app['db']
post = await request.post()
username = post['username']
login_id = post.get('login_id', '')
is_developer = post.get('is_developer') == '1'
is_service_account = post.get('is_service_account') == '1'
try:
if login_id == '':
login_id = None
created_user = await insert_new_user(db, username, login_id, is_developer, is_service_account)
except AuthUserError as e:
set_message(session, e.message, 'error')
return web.HTTPFound(deploy_config.external_url('auth', '/users'))
if created_user:
set_message(session, f'Created user {username} {login_id}.', 'info')
else:
set_message(session, f'User {username} {login_id} already exists.', 'info')
return web.HTTPFound(deploy_config.external_url('auth', '/users'))
@routes.get('/api/v1alpha/users')
@rest_authenticated_developers_only
async def rest_get_users(request, userdata): # pylint: disable=unused-argument
db: Database = request.app['db']
users = await db.select_and_fetchall(
'''
SELECT id, username, login_id, state, is_developer, is_service_account FROM users;
'''
)
return web.json_response([user async for user in users])
@routes.get('/api/v1alpha/users/{user}')
@rest_authenticated_developers_only
async def rest_get_user(request, userdata): # pylint: disable=unused-argument
db: Database = request.app['db']
username = request.match_info['user']
user = await db.select_and_fetchone(
'''
SELECT id, username, login_id, state, is_developer, is_service_account FROM users
WHERE username = %s;
''',
(username,),
)
if user is None:
raise web.HTTPNotFound()
return web.json_response(user)
async def _delete_user(db: Database, username: str, id: Optional[str]):
where_conditions = ['state != "deleted"', 'username = %s']
where_args = [username]
if id is not None:
where_conditions.append('id = %s')
where_args.append(id)
n_rows = await db.execute_update(
f'''
UPDATE users
SET state = 'deleting'
WHERE {' AND '.join(where_conditions)};
''',
where_args,
)
if n_rows == 0:
raise UnknownUser(username)
@routes.post('/users/delete')
@check_csrf_token
@web_authenticated_developers_only()
async def delete_user(request, userdata): # pylint: disable=unused-argument
session = await aiohttp_session.get_session(request)
db = request.app['db']
post = await request.post()
id = post['id']
username = post['username']
try:
await _delete_user(db, username, id)
set_message(session, f'Deleted user {id} {username}.', 'info')
except UnknownUser:
set_message(session, f'Delete failed, no such user {id} {username}.', 'error')
return web.HTTPFound(deploy_config.external_url('auth', '/users'))
@routes.delete('/api/v1alpha/users/{user}')
@rest_authenticated_developers_only
async def rest_delete_user(request: web.Request, userdata): # pylint: disable=unused-argument
db = request.app['db']
username = request.match_info['user']
try:
await _delete_user(db, username, None)
except UnknownUser as e:
return e.http_response()
return web.json_response()
@routes.get('/api/v1alpha/oauth2callback')
async def rest_callback(request):
flow_json = request.query.get('flow')
if flow_json is None:
# backwards compatibility with older versions of hailctl
callback_port = request.query['callback_port']
flow_dict = {
'state': request.query['state'],
'callback_uri': f'http://127.0.0.1:{callback_port}/oauth2callback',
}
else:
flow_dict = json.loads(request.query['flow'])
try:
flow_result = request.app['flow_client'].receive_callback(request, flow_dict)
except asyncio.CancelledError:
raise
except Exception as e:
log.exception('fetching and decoding token')
raise web.HTTPUnauthorized() from e
db = request.app['db']
users = [
x
async for x in db.select_and_fetchall(
"SELECT * FROM users WHERE login_id = %s AND state = 'active';", flow_result.login_id
)
]
if len(users) != 1:
raise web.HTTPUnauthorized()
user = users[0]
session_id = await create_session(db, user['id'], max_age_secs=None)
return web.json_response({'token': session_id, 'username': user['username']})
@routes.post('/api/v1alpha/copy-paste-login')
async def rest_copy_paste_login(request):
copy_paste_token = request.query['copy_paste_token']
db = request.app['db']
@transaction(db)
async def maybe_pop_token(tx):
session = await tx.execute_and_fetchone(
"""
SELECT sessions.session_id AS session_id, users.username AS username FROM copy_paste_tokens
INNER JOIN sessions ON sessions.session_id = copy_paste_tokens.session_id
INNER JOIN users ON users.id = sessions.user_id
WHERE copy_paste_tokens.id = %s
AND NOW() < TIMESTAMPADD(SECOND, copy_paste_tokens.max_age_secs, copy_paste_tokens.created)
AND users.state = 'active';""",
copy_paste_token,
)
if session is None:
raise web.HTTPUnauthorized()
await tx.just_execute("DELETE FROM copy_paste_tokens WHERE id = %s;", copy_paste_token)
return session
session = await maybe_pop_token() # pylint: disable=no-value-for-parameter
return web.json_response({'token': session['session_id'], 'username': session['username']})
@routes.post('/api/v1alpha/logout')
@rest_authenticated_users_only
async def rest_logout(request, userdata):
session_id = userdata['session_id']
db = request.app['db']
await db.just_execute('DELETE FROM sessions WHERE session_id = %s;', session_id)
return web.Response(status=200)
async def get_userinfo(request, session_id):
# b64 encoding of 32-byte session ID is 44 bytes
if len(session_id) != 44:
log.info('Session id != 44 bytes')
raise web.HTTPUnauthorized()
db = request.app['db']
users = [
x
async for x in db.select_and_fetchall(
'''
SELECT users.*, sessions.session_id FROM users
INNER JOIN sessions ON users.id = sessions.user_id
WHERE users.state = 'active' AND (sessions.session_id = %s) AND (ISNULL(sessions.max_age_secs) OR (NOW() < TIMESTAMPADD(SECOND, sessions.max_age_secs, sessions.created)));
''',
session_id,
)
]
if len(users) != 1:
log.info(f'Unknown session id: {session_id}')
raise web.HTTPUnauthorized()
return users[0]
@routes.get('/api/v1alpha/userinfo')
async def userinfo(request):
if 'Authorization' not in request.headers:
log.info('Authorization not in request.headers')
raise web.HTTPUnauthorized()
auth_header = request.headers['Authorization']
session_id = maybe_parse_bearer_header(auth_header)
if not session_id:
log.info('Bearer not in Authorization header')
raise web.HTTPUnauthorized()
return web.json_response(await get_userinfo(request, session_id))
async def get_session_id(request):
if 'X-Hail-Internal-Authorization' in request.headers:
return maybe_parse_bearer_header(request.headers['X-Hail-Internal-Authorization'])
if 'Authorization' in request.headers:
return maybe_parse_bearer_header(request.headers['Authorization'])
session = await aiohttp_session.get_session(request)
return session.get('session_id')
@routes.get('/api/v1alpha/verify_dev_credentials')
async def verify_dev_credentials(request):
session_id = await get_session_id(request)
if not session_id:
raise web.HTTPUnauthorized()
userdata = await get_userinfo(request, session_id)
is_developer = userdata is not None and userdata['is_developer'] == 1
if not is_developer:
raise web.HTTPUnauthorized()
return web.Response(status=200)
@routes.get('/api/v1alpha/verify_dev_or_sa_credentials')
async def verify_dev_or_sa_credentials(request):
session_id = await get_session_id(request)
if not session_id:
raise web.HTTPUnauthorized()
userdata = await get_userinfo(request, session_id)
is_developer_or_sa = userdata is not None and (userdata['is_developer'] == 1 or userdata['is_service_account'] == 1)
if not is_developer_or_sa:
raise web.HTTPUnauthorized()
return web.Response(status=200)
async def on_startup(app):
db = Database()
await db.async_init(maxsize=50)
app['db'] = db
app['client_session'] = httpx.client_session()
app['flow_client'] = get_flow_client('/auth-oauth2-client-secret/client_secret.json')
async def on_cleanup(app):
try:
await app['db'].async_close()
finally:
await app['client_session'].close()
def run():
app = web.Application(middlewares=[monitor_endpoints_middleware])
setup_aiohttp_jinja2(app, 'auth')
setup_aiohttp_session(app)
setup_common_static_routes(routes)
app.add_routes(routes)
app.router.add_get("/metrics", server_stats)
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
web.run_app(
deploy_config.prefix_application(app, 'auth'),
host='0.0.0.0',
port=5000,
access_log_class=AccessLogger,
ssl_context=internal_server_ssl_context(),
)
|
hail-is/hail
|
auth/auth/auth.py
|
Python
|
mit
| 25,475
| 0.002198
|
from sympy import (diff, trigsimp, expand, sin, cos, solve, Symbol, sympify,
eye, symbols, Dummy, ImmutableMatrix as Matrix, MatrixBase)
from sympy.core.compatibility import string_types, range
from sympy.physics.vector.vector import Vector, _check_vector
__all__ = ['CoordinateSym', 'ReferenceFrame']
class CoordinateSym(Symbol):
"""
A coordinate symbol/base scalar associated wrt a Reference Frame.
Ideally, users should not instantiate this class. Instances of
this class must only be accessed through the corresponding frame
as 'frame[index]'.
CoordinateSyms having the same frame and index parameters are equal
(even though they may be instantiated separately).
Parameters
==========
name : string
The display name of the CoordinateSym
frame : ReferenceFrame
The reference frame this base scalar belongs to
index : 0, 1 or 2
The index of the dimension denoted by this coordinate variable
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, CoordinateSym
>>> A = ReferenceFrame('A')
>>> A[1]
A_y
>>> type(A[0])
<class 'sympy.physics.vector.frame.CoordinateSym'>
>>> a_y = CoordinateSym('a_y', A, 1)
>>> a_y == A[1]
True
"""
def __new__(cls, name, frame, index):
# We can't use the cached Symbol.__new__ because this class depends on
# frame and index, which are not passed to Symbol.__xnew__.
assumptions = {}
super(CoordinateSym, cls)._sanitize(assumptions, cls)
obj = super(CoordinateSym, cls).__xnew__(cls, name, **assumptions)
_check_frame(frame)
if index not in range(0, 3):
raise ValueError("Invalid index specified")
obj._id = (frame, index)
return obj
@property
def frame(self):
return self._id[0]
def __eq__(self, other):
#Check if the other object is a CoordinateSym of the same frame
#and same index
if isinstance(other, CoordinateSym):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return tuple((self._id[0].__hash__(), self._id[1])).__hash__()
class ReferenceFrame(object):
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : list (of strings)
If custom indices are desired for console, pretty, and LaTeX
printing, supply three as a list. The basis vectors can then be
accessed with the get_item method.
latexs : list (of strings)
If custom names are desired for LaTeX printing of each basis
vector, supply the names here in a list.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
"""
if not isinstance(name, string_types):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, string_types):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + u"_" + indices[0]),
(name.lower() + u"_" + indices[1]),
(name.lower() + u"_" + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + u"_x",
name.lower() + u"_y",
name.lower() + u"_z"]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, string_types):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
#The _dcm_dict dictionary will only store the dcms of parent-child
#relationships. The _dcm_cache dictionary will work as the dcm
#cache.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
#Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, string_types):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0), \
CoordinateSym(variables[1], self, 1), \
CoordinateSym(variables[2], self, 2))
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not isinstance(ind, str):
if ind < 3:
return self.varlist[ind]
else:
raise ValueError("Invalid index provided")
if self.indices[0] == ind:
return self.x
if self.indices[1] == ind:
return self.y
if self.indices[2] == ind:
return self.z
else:
raise ValueError('Not a defined index')
def __iter__(self):
return iter([self.x, self.y, self.z])
def __str__(self):
"""Returns the name of the frame. """
return self.name
__repr__ = __str__
def _dict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._dlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + self.name +
' and ' + other.name)
def _w_diff_dcm(self, otherframe):
"""Angular velocity from time differentiating the DCM. """
from sympy.physics.vector.functions import dynamicsymbols
dcm2diff = self.dcm(otherframe)
diffed = dcm2diff.diff(dynamicsymbols._t)
angvelmat = diffed * dcm2diff.T
w1 = trigsimp(expand(angvelmat[7]), recursive=True)
w2 = trigsimp(expand(angvelmat[2]), recursive=True)
w3 = trigsimp(expand(angvelmat[3]), recursive=True)
return -Vector([(Matrix([w1, w2, w3]), self)])
def variable_map(self, otherframe):
"""
Returns a dictionary which expresses the coordinate variables
of this frame in terms of the variables of otherframe.
If Vector.simp is True, returns a simplified version of the mapped
values. Else, returns them without simplification.
Simplification of the expressions may take time.
Parameters
==========
otherframe : ReferenceFrame
The other frame to map the variables to
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> A = ReferenceFrame('A')
>>> q = dynamicsymbols('q')
>>> B = A.orientnew('B', 'Axis', [q, A.z])
>>> A.variable_map(B)
{A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z}
"""
_check_frame(otherframe)
if (otherframe, Vector.simp) in self._var_dict:
return self._var_dict[(otherframe, Vector.simp)]
else:
vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist)
mapping = {}
for i, x in enumerate(self):
if Vector.simp:
mapping[self.varlist[i]] = trigsimp(vars_matrix[i], method='fu')
else:
mapping[self.varlist[i]] = vars_matrix[i]
self._var_dict[(otherframe, Vector.simp)] = mapping
return mapping
def ang_acc_in(self, otherframe):
"""Returns the angular acceleration Vector of the ReferenceFrame.
Effectively returns the Vector:
^N alpha ^B
which represent the angular acceleration of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular acceleration is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
_check_frame(otherframe)
if otherframe in self._ang_acc_dict:
return self._ang_acc_dict[otherframe]
else:
return self.ang_vel_in(otherframe).dt(otherframe)
def ang_vel_in(self, otherframe):
"""Returns the angular velocity Vector of the ReferenceFrame.
Effectively returns the Vector:
^N omega ^B
which represent the angular velocity of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular velocity is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
_check_frame(otherframe)
flist = self._dict_list(otherframe, 1)
outvec = Vector(0)
for i in range(len(flist) - 1):
outvec += flist[i]._ang_vel_dict[flist[i + 1]]
return outvec
def dcm(self, otherframe):
"""The direction cosine matrix between frames.
This gives the DCM between this frame and the otherframe.
The format is N.xyz = N.dcm(B) * B.xyz
A SymPy Matrix is returned.
Parameters
==========
otherframe : ReferenceFrame
The otherframe which the DCM is generated to.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> N.dcm(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
_check_frame(otherframe)
#Check if the dcm wrt that frame has already been calculated
if otherframe in self._dcm_cache:
return self._dcm_cache[otherframe]
flist = self._dict_list(otherframe, 0)
outdcm = eye(3)
for i in range(len(flist) - 1):
outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]]
#After calculation, store the dcm in dcm cache for faster
#future retrieval
self._dcm_cache[otherframe] = outdcm
otherframe._dcm_cache[self] = outdcm.T
return outdcm
def orient(self, parent, rot_type, amounts, rot_order=''):
"""Defines the orientation of this frame relative to a parent frame.
Parameters
==========
parent : ReferenceFrame
The frame that this ReferenceFrame will have its orientation matrix
defined in relation to.
rot_type : str
The type of orientation matrix that is being created. Supported
types are 'Body', 'Space', 'Quaternion', 'Axis', and 'DCM'. See examples
for correct usage.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
In case of rot_type='DCM', value must be a sympy.matrices.MatrixBase object
(or subclasses of it).
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols, eye, ImmutableMatrix
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
Now we have a choice of how to implement the orientation. First is
Body. Body orientation takes this reference frame through three
successive simple rotations. Acceptable rotation orders are of length
3, expressed in XYZ or 123, and cannot have a rotation about about an
axis twice in a row.
>>> B.orient(N, 'Body', [q1, q2, q3], '123')
>>> B.orient(N, 'Body', [q1, q2, 0], 'ZXZ')
>>> B.orient(N, 'Body', [0, 0, 0], 'XYX')
Next is Space. Space is like Body, but the rotations are applied in the
opposite order.
>>> B.orient(N, 'Space', [q1, q2, q3], '312')
Next is Quaternion. This orients the new ReferenceFrame with
Quaternions, defined as a finite rotation about lambda, a unit vector,
by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
>>> B.orient(N, 'Quaternion', [q0, q1, q2, q3])
Next is Axis. This is a rotation about an arbitrary, non-time-varying
axis by some angle. The axis is supplied as a Vector. This is how
simple rotations are defined.
>>> B.orient(N, 'Axis', [q1, N.x + 2 * N.y])
Last is DCM (Direction Cosine Matrix). This is a rotation matrix given manually.
>>> B.orient(N, 'DCM', eye(3))
>>> B.orient(N, 'DCM', ImmutableMatrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
# Allow passing a rotation matrix manually.
if rot_type == 'DCM':
# When rot_type == 'DCM', then amounts must be a Matrix type object
# (e.g. sympy.matrices.dense.MutableDenseMatrix).
if not isinstance(amounts, MatrixBase):
raise TypeError("Amounts must be a sympy Matrix type object.")
else:
amounts = list(amounts)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
def _rot(axis, angle):
"""DCM for simple axis 1,2,or 3 rotations. """
if axis == 1:
return Matrix([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == 2:
return Matrix([[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]])
elif axis == 3:
return Matrix([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
rot_order = str(
rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not rot_order in approved_orders:
raise TypeError('The supplied order is not an approved type')
parent_orient = []
if rot_type == 'AXIS':
if not rot_order == '':
raise TypeError('Axis orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):
raise TypeError('Amounts are a list or tuple of length 2')
theta = amounts[0]
axis = amounts[1]
axis = _check_vector(axis)
if not axis.dt(parent) == 0:
raise ValueError('Axis cannot be time-varying')
axis = axis.express(parent).normalize()
axis = axis.args[0][0]
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)
elif rot_type == 'QUATERNION':
if not rot_order == '':
raise TypeError(
'Quaternion orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):
raise TypeError('Amounts are a list or tuple of length 4')
q0, q1, q2, q3 = amounts
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **
2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *
q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))
elif rot_type == 'BODY':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])
* _rot(a3, amounts[2]))
elif rot_type == 'SPACE':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Space orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])
* _rot(a1, amounts[0]))
elif rot_type == 'DCM':
parent_orient = amounts
else:
raise NotImplementedError('That is not an implemented rotation')
#Reset the _dcm_cache of this frame, and remove it from the _dcm_caches
#of the frames it is linked to. Also remove it from the _dcm_dict of
#its parent
frames = self._dcm_cache.keys()
dcm_dict_del = []
dcm_cache_del = []
for frame in frames:
if frame in self._dcm_dict:
dcm_dict_del += [frame]
dcm_cache_del += [frame]
for frame in dcm_dict_del:
del frame._dcm_dict[self]
for frame in dcm_cache_del:
del frame._dcm_cache[self]
#Add the dcm relationship to _dcm_dict
self._dcm_dict = self._dlist[0] = {}
self._dcm_dict.update({parent: parent_orient.T})
parent._dcm_dict.update({self: parent_orient})
#Also update the dcm cache after resetting it
self._dcm_cache = {}
self._dcm_cache.update({parent: parent_orient.T})
parent._dcm_cache.update({self: parent_orient})
if rot_type == 'QUATERNION':
t = dynamicsymbols._t
q0, q1, q2, q3 = amounts
q0d = diff(q0, t)
q1d = diff(q1, t)
q2d = diff(q2, t)
q3d = diff(q3, t)
w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)
w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)
w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)
wvec = Vector([(Matrix([w1, w2, w3]), self)])
elif rot_type == 'AXIS':
thetad = (amounts[0]).diff(dynamicsymbols._t)
wvec = thetad * amounts[1].express(parent).normalize()
elif rot_type == 'DCM':
wvec = self._w_diff_dcm(parent)
else:
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
rot_type, rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orientnew(self, newname, rot_type, amounts, rot_order='',
variables=None, indices=None, latexs=None):
"""Creates a new ReferenceFrame oriented with respect to this Frame.
See ReferenceFrame.orient() for acceptable rotation types, amounts,
and orders. Parent is going to be self.
Parameters
==========
newname : str
The name for the new ReferenceFrame
rot_type : str
The type of orientation matrix that is being created.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
Now we have a choice of how to implement the orientation. First is
Body. Body orientation takes this reference frame through three
successive simple rotations. Acceptable rotation orders are of length
3, expressed in XYZ or 123, and cannot have a rotation about about an
axis twice in a row.
>>> A = N.orientnew('A', 'Body', [q1, q2, q3], '123')
>>> A = N.orientnew('A', 'Body', [q1, q2, 0], 'ZXZ')
>>> A = N.orientnew('A', 'Body', [0, 0, 0], 'XYX')
Next is Space. Space is like Body, but the rotations are applied in the
opposite order.
>>> A = N.orientnew('A', 'Space', [q1, q2, q3], '312')
Next is Quaternion. This orients the new ReferenceFrame with
Quaternions, defined as a finite rotation about lambda, a unit vector,
by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
>>> A = N.orientnew('A', 'Quaternion', [q0, q1, q2, q3])
Last is Axis. This is a rotation about an arbitrary, non-time-varying
axis by some angle. The axis is supplied as a Vector. This is how
simple rotations are defined.
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
"""
newframe = self.__class__(newname, variables, indices, latexs)
newframe.orient(self, rot_type, amounts, rot_order)
return newframe
def set_ang_acc(self, otherframe, value):
"""Define the angular acceleration Vector in a ReferenceFrame.
Defines the angular acceleration of this ReferenceFrame, in another.
Angular acceleration can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular acceleration in
value : Vector
The Vector representing angular acceleration
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_acc_dict.update({otherframe: value})
otherframe._ang_acc_dict.update({self: -value})
def set_ang_vel(self, otherframe, value):
"""Define the angular velocity vector in a ReferenceFrame.
Defines the angular velocity of this ReferenceFrame, in another.
Angular velocity can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular velocity in
value : Vector
The Vector representing angular velocity
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_vel_dict.update({otherframe: value})
otherframe._ang_vel_dict.update({self: -value})
@property
def x(self):
"""The basis Vector for the ReferenceFrame, in the x direction. """
return self._x
@property
def y(self):
"""The basis Vector for the ReferenceFrame, in the y direction. """
return self._y
@property
def z(self):
"""The basis Vector for the ReferenceFrame, in the z direction. """
return self._z
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial angular velocities of this frame in the given
frame with respect to one or more provided generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the angular velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial angular velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> A.set_ang_vel(N, u1 * A.x + u2 * N.y)
>>> A.partial_velocity(N, u1)
A.x
>>> A.partial_velocity(N, u1, u2)
(A.x, N.y)
"""
partials = [self.ang_vel_in(frame).diff(speed, frame, var_in_dcm=False)
for speed in gen_speeds]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
def _check_frame(other):
from .vector import VectorTypeError
if not isinstance(other, ReferenceFrame):
raise VectorTypeError(other, ReferenceFrame('A'))
|
postvakje/sympy
|
sympy/physics/vector/frame.py
|
Python
|
bsd-3-clause
| 31,125
| 0.001157
|
"""PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
import contextlib
import functools
import operator
import os
import re
import struct
import subprocess
import sys
from typing import IO, Iterator, NamedTuple, Optional, Tuple
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
"""Detect musl libc location by parsing the Python executable.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
f.seek(0)
try:
ident = _read_unpacked(f, "16B")
except struct.error:
return None
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
return None
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, p_fmt, p_idx = {
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
}[ident[4]]
except KeyError:
return None
else:
p_get = operator.itemgetter(*p_idx)
# Find the interpreter section and return its content.
try:
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
except struct.error:
return None
for i in range(e_phnum + 1):
f.seek(e_phoff + e_phentsize * i)
try:
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
except struct.error:
return None
if p_type != 3: # Not PT_INTERP.
continue
f.seek(p_offset)
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
if "musl" not in interpreter:
return None
return interpreter
return None
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
with contextlib.ExitStack() as stack:
try:
f = stack.enter_context(open(executable, "rb"))
except IOError:
return None
ld = _parse_ld_musl_from_elf(f)
if not ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
|
paolodedios/pybuilder
|
src/main/python/pybuilder/_vendor/pkg_resources/_vendor/packaging/_musllinux.py
|
Python
|
apache-2.0
| 4,378
| 0.000457
|
# -*- coding: utf-8 -*-
"""
Test the QgsSettings class
Run with: ctest -V -R PyQgsSettings
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import tempfile
from qgis.core import QgsSettings, QgsTolerance, QgsMapLayerProxyModel
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QSettings, QVariant
from pathlib import Path
__author__ = 'Alessandro Pasotti'
__date__ = '02/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
start_app()
class TestQgsSettings(unittest.TestCase):
cnt = 0
def setUp(self):
self.cnt += 1
h, path = tempfile.mkstemp('.ini')
Path(path).touch()
assert QgsSettings.setGlobalSettingsPath(path)
self.settings = QgsSettings('testqgissettings', 'testqgissettings%s' % self.cnt)
self.globalsettings = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat)
self.globalsettings.sync()
assert os.path.exists(self.globalsettings.fileName())
def tearDown(self):
settings_file = self.settings.fileName()
settings_default_file = self.settings.globalSettingsPath()
del(self.settings)
try:
os.unlink(settings_file)
except:
pass
try:
os.unlink(settings_default_file)
except:
pass
def addToDefaults(self, key, value):
self.globalsettings.setValue(key, value)
self.globalsettings.sync()
def addArrayToDefaults(self, prefix, key, values):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginWriteArray(prefix)
i = 0
for v in values:
self.globalsettings.setArrayIndex(i)
self.globalsettings.setValue(key, v)
i += 1
self.globalsettings.endArray()
self.globalsettings.sync()
def addGroupToDefaults(self, prefix, kvp):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginGroup(prefix)
for k, v in kvp.items():
self.globalsettings.setValue(k, v)
self.globalsettings.endGroup()
self.globalsettings.sync()
def test_basic_functionality(self):
self.assertEqual(self.settings.value('testqgissettings/doesnotexists', 'notexist'), 'notexist')
self.settings.setValue('testqgissettings/name', 'qgisrocks')
self.settings.sync()
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_defaults(self):
self.assertIsNone(self.settings.value('testqgissettings/name'))
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_allkeys(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.addToDefaults('testqgissettings/name2', 'qgisrocks2')
self.settings.setValue('nepoti/eman', 'osaple')
self.assertEqual(3, len(self.settings.allKeys()))
self.assertIn('testqgissettings/name', self.settings.allKeys())
self.assertIn('nepoti/eman', self.settings.allKeys())
self.assertEqual('qgisrocks', self.settings.value('testqgissettings/name'))
self.assertEqual('qgisrocks2', self.settings.value('testqgissettings/name2'))
self.assertEqual('qgisrocks', self.globalsettings.value('testqgissettings/name'))
self.assertEqual('osaple', self.settings.value('nepoti/eman'))
self.assertEqual(3, len(self.settings.allKeys()))
self.assertEqual(2, len(self.globalsettings.allKeys()))
def test_precedence_simple(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.settings.setValue('testqgissettings/names/name1', 'qgisrocks-1')
self.assertEqual(self.settings.value('testqgissettings/names/name1'), 'qgisrocks-1')
def test_precedence_group(self):
"""Test if user can override a group value"""
self.assertEqual(self.settings.allKeys(), [])
self.addGroupToDefaults('connections-xyz', {
'OSM': 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png',
'OSM-b': 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png',
})
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override edit
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: the global value will be resumed!!!
self.settings.beginGroup('connections-xyz')
self.settings.remove('OSM')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a blank!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', '')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), '')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a None: will resume the global setting!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', None)
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
def test_uft8(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/namèé↓1', 'qgisrocks↓1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓1')
self.settings.setValue('testqgissettings/names/namèé↓2', 'qgisrocks↓2')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓2'), 'qgisrocks↓2')
self.settings.setValue('testqgissettings/names/namèé↓1', 'qgisrocks↓-1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓-1')
def test_groups(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.addToDefaults('testqgissettings/names/name2', 'qgisrocks2')
self.addToDefaults('testqgissettings/names/name3', 'qgisrocks3')
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(self.settings.group(), 'testqgissettings')
self.assertEqual(['names'], self.settings.childGroups())
self.settings.setValue('surnames/name1', 'qgisrocks-1')
self.assertEqual(['surnames', 'names'], self.settings.childGroups())
self.settings.setValue('names/name1', 'qgisrocks-1')
self.assertEqual('qgisrocks-1', self.settings.value('names/name1'))
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.settings.beginGroup('testqgissettings/names')
self.assertEqual(self.settings.group(), 'testqgissettings/names')
self.settings.setValue('name4', 'qgisrocks-4')
keys = sorted(self.settings.childKeys())
self.assertEqual(keys, ['name1', 'name2', 'name3', 'name4'])
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.assertEqual('qgisrocks-1', self.settings.value('testqgissettings/names/name1'))
self.assertEqual('qgisrocks-4', self.settings.value('testqgissettings/names/name4'))
def test_global_groups(self):
self.assertEqual(self.settings.allKeys(), [])
self.assertEqual(self.globalsettings.allKeys(), [])
self.addToDefaults('testqgissettings/foo/first', 'qgis')
self.addToDefaults('testqgissettings/foo/last', 'rocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(self.settings.group(), 'testqgissettings')
self.assertEqual(['foo'], self.settings.childGroups())
self.assertEqual(['foo'], self.settings.globalChildGroups())
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.settings.setValue('testqgissettings/bar/first', 'qgis')
self.settings.setValue('testqgissettings/bar/last', 'rocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(sorted(['bar', 'foo']), sorted(self.settings.childGroups()))
self.assertEqual(['foo'], self.settings.globalChildGroups())
self.settings.endGroup()
self.globalsettings.remove('testqgissettings/foo')
self.settings.beginGroup('testqgissettings')
self.assertEqual(['bar'], self.settings.childGroups())
self.assertEqual([], self.settings.globalChildGroups())
self.settings.endGroup()
def test_group_section(self):
# Test group by using Section
self.settings.beginGroup('firstgroup', section=QgsSettings.Core)
self.assertEqual(self.settings.group(), 'core/firstgroup')
self.assertEqual([], self.settings.childGroups())
self.settings.setValue('key', 'value')
self.settings.setValue('key2/subkey1', 'subvalue1')
self.settings.setValue('key2/subkey2', 'subvalue2')
self.settings.setValue('key3', 'value3')
self.assertEqual(['key', 'key2/subkey1', 'key2/subkey2', 'key3'], self.settings.allKeys())
self.assertEqual(['key', 'key3'], self.settings.childKeys())
self.assertEqual(['key2'], self.settings.childGroups())
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
# Set value by writing the group manually
self.settings.setValue('firstgroup/key4', 'value4', section=QgsSettings.Core)
# Checking the value that have been set
self.assertEqual(self.settings.value('firstgroup/key', section=QgsSettings.Core), 'value')
self.assertEqual(self.settings.value('firstgroup/key2/subkey1', section=QgsSettings.Core), 'subvalue1')
self.assertEqual(self.settings.value('firstgroup/key2/subkey2', section=QgsSettings.Core), 'subvalue2')
self.assertEqual(self.settings.value('firstgroup/key3', section=QgsSettings.Core), 'value3')
self.assertEqual(self.settings.value('firstgroup/key4', section=QgsSettings.Core), 'value4')
# Clean up firstgroup
self.settings.remove('firstgroup', section=QgsSettings.Core)
def test_array(self):
self.assertEqual(self.settings.allKeys(), [])
self.addArrayToDefaults('testqgissettings', 'key', ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
self.assertEqual(self.settings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(self.globalsettings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(3, self.globalsettings.beginReadArray('testqgissettings'))
self.globalsettings.endArray()
self.assertEqual(3, self.settings.beginReadArray('testqgissettings'))
values = []
for i in range(3):
self.settings.setArrayIndex(i)
values.append(self.settings.value("key"))
self.assertEqual(values, ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
def test_array_overrides(self):
"""Test if an array completely shadows the global one"""
self.assertEqual(self.settings.allKeys(), [])
self.addArrayToDefaults('testqgissettings', 'key', ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
self.assertEqual(self.settings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(self.globalsettings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(3, self.globalsettings.beginReadArray('testqgissettings'))
self.globalsettings.endArray()
self.assertEqual(3, self.settings.beginReadArray('testqgissettings'))
# Now override!
self.settings.beginWriteArray('testqgissettings')
self.settings.setArrayIndex(0)
self.settings.setValue('key', 'myqgisrocksmore1')
self.settings.setArrayIndex(1)
self.settings.setValue('key', 'myqgisrocksmore2')
self.settings.endArray()
# Check it!
self.assertEqual(2, self.settings.beginReadArray('testqgissettings'))
values = []
for i in range(2):
self.settings.setArrayIndex(i)
values.append(self.settings.value("key"))
self.assertEqual(values, ['myqgisrocksmore1', 'myqgisrocksmore2'])
def test_section_getters_setters(self):
self.assertEqual(self.settings.allKeys(), [])
self.settings.setValue('key1', 'core1', section=QgsSettings.Core)
self.settings.setValue('key2', 'core2', section=QgsSettings.Core)
self.settings.setValue('key1', 'server1', section=QgsSettings.Server)
self.settings.setValue('key2', 'server2', section=QgsSettings.Server)
self.settings.setValue('key1', 'gui1', section=QgsSettings.Gui)
self.settings.setValue('key2', 'gui2', QgsSettings.Gui)
self.settings.setValue('key1', 'plugins1', section=QgsSettings.Plugins)
self.settings.setValue('key2', 'plugins2', section=QgsSettings.Plugins)
self.settings.setValue('key1', 'misc1', section=QgsSettings.Misc)
self.settings.setValue('key2', 'misc2', section=QgsSettings.Misc)
self.settings.setValue('key1', 'auth1', section=QgsSettings.Auth)
self.settings.setValue('key2', 'auth2', section=QgsSettings.Auth)
self.settings.setValue('key1', 'app1', section=QgsSettings.App)
self.settings.setValue('key2', 'app2', section=QgsSettings.App)
self.settings.setValue('key1', 'provider1', section=QgsSettings.Providers)
self.settings.setValue('key2', 'provider2', section=QgsSettings.Providers)
# This is an overwrite of previous setting and it is intentional
self.settings.setValue('key1', 'auth1', section=QgsSettings.Auth)
self.settings.setValue('key2', 'auth2', section=QgsSettings.Auth)
# Test that the values are namespaced
self.assertEqual(self.settings.value('core/key1'), 'core1')
self.assertEqual(self.settings.value('core/key2'), 'core2')
self.assertEqual(self.settings.value('server/key1'), 'server1')
self.assertEqual(self.settings.value('server/key2'), 'server2')
self.assertEqual(self.settings.value('gui/key1'), 'gui1')
self.assertEqual(self.settings.value('gui/key2'), 'gui2')
self.assertEqual(self.settings.value('plugins/key1'), 'plugins1')
self.assertEqual(self.settings.value('plugins/key2'), 'plugins2')
self.assertEqual(self.settings.value('misc/key1'), 'misc1')
self.assertEqual(self.settings.value('misc/key2'), 'misc2')
# Test getters
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Core), 'core1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Core), 'core2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Server), 'server1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Server), 'server2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Gui), 'gui1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Gui), 'gui2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Plugins), 'plugins1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Plugins), 'plugins2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Misc), 'misc1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Misc), 'misc2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Auth), 'auth1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Auth), 'auth2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.App), 'app1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.App), 'app2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Providers), 'provider1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Providers), 'provider2')
# Test default values on Section getter
self.assertEqual(self.settings.value('key_not_exist', 'misc_not_exist', section=QgsSettings.Misc), 'misc_not_exist')
def test_contains(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/name', 'qgisrocks1')
self.addToDefaults('testqgissettings/name2', 'qgisrocks2')
self.assertTrue(self.settings.contains('testqgissettings/name'))
self.assertTrue(self.settings.contains('testqgissettings/name2'))
self.settings.setValue('testqgissettings/name3', 'qgisrocks3')
self.assertTrue(self.settings.contains('testqgissettings/name3'))
def test_remove(self):
self.settings.setValue('testQgisSettings/temp', True)
self.assertEqual(self.settings.value('testQgisSettings/temp'), True)
self.settings.remove('testQgisSettings/temp')
self.assertEqual(self.settings.value('testqQgisSettings/temp'), None)
# Test remove by using Section
self.settings.setValue('testQgisSettings/tempSection', True, section=QgsSettings.Core)
self.assertEqual(self.settings.value('testQgisSettings/tempSection', section=QgsSettings.Core), True)
self.settings.remove('testQgisSettings/temp', section=QgsSettings.Core)
self.assertEqual(self.settings.value('testqQgisSettings/temp', section=QgsSettings.Core), None)
def test_enumValue(self):
self.settings.setValue('enum', 'LayerUnits')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.LayerUnits)
self.settings.setValue('enum', 'dummy_setting')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.Pixels)
self.assertEqual(type(self.settings.enumValue('enum', QgsTolerance.Pixels)), QgsTolerance.UnitType)
def test_setEnumValue(self):
self.settings.setValue('enum', 'LayerUnits')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.LayerUnits)
self.settings.setEnumValue('enum', QgsTolerance.Pixels)
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.Pixels)
def test_flagValue(self):
pointAndLine = QgsMapLayerProxyModel.Filters(QgsMapLayerProxyModel.PointLayer | QgsMapLayerProxyModel.LineLayer)
pointAndPolygon = QgsMapLayerProxyModel.Filters(QgsMapLayerProxyModel.PointLayer | QgsMapLayerProxyModel.PolygonLayer)
self.settings.setValue('flag', 'PointLayer|PolygonLayer')
self.assertEqual(self.settings.flagValue('flag', pointAndLine), pointAndPolygon)
self.settings.setValue('flag', 'dummy_setting')
self.assertEqual(self.settings.flagValue('flag', pointAndLine), pointAndLine)
self.assertEqual(type(self.settings.flagValue('enum', pointAndLine)), QgsMapLayerProxyModel.Filters)
def test_overwriteDefaultValues(self):
"""Test that unchanged values are not stored"""
self.globalsettings.setValue('a_value_with_default', 'a value')
self.globalsettings.setValue('an_invalid_value', QVariant())
self.assertEqual(self.settings.value('a_value_with_default'), 'a value')
self.assertEqual(self.settings.value('an_invalid_value'), QVariant())
# Now, set them with the same current value
self.settings.setValue('a_value_with_default', 'a value')
self.settings.setValue('an_invalid_value', QVariant())
# Check
pure_settings = QSettings(self.settings.fileName(), QSettings.IniFormat)
self.assertFalse('a_value_with_default' in pure_settings.allKeys())
self.assertFalse('an_invalid_value' in pure_settings.allKeys())
# Set a changed value
self.settings.setValue('a_value_with_default', 'a new value')
self.settings.setValue('an_invalid_value', 'valid value')
# Check
self.assertTrue('a_value_with_default' in pure_settings.allKeys())
self.assertTrue('an_invalid_value' in pure_settings.allKeys())
self.assertEqual(self.settings.value('a_value_with_default'), 'a new value')
self.assertEqual(self.settings.value('an_invalid_value'), 'valid value')
# Re-set to original values
self.settings.setValue('a_value_with_default', 'a value')
self.settings.setValue('an_invalid_value', QVariant())
self.assertEqual(self.settings.value('a_value_with_default'), 'a value')
self.assertEqual(self.settings.value('an_invalid_value'), QVariant())
# Check if they are gone
pure_settings = QSettings(self.settings.fileName(), QSettings.IniFormat)
self.assertFalse('a_value_with_default' not in pure_settings.allKeys())
self.assertFalse('an_invalid_value' not in pure_settings.allKeys())
if __name__ == '__main__':
unittest.main()
|
pblottiere/QGIS
|
tests/src/python/test_qgssettings.py
|
Python
|
gpl-2.0
| 23,013
| 0.003525
|
import os
import time
from .common import FileDownloader
from ..utils import (
compat_urllib_request,
compat_urllib_error,
ContentTooShortError,
encodeFilename,
sanitize_open,
format_bytes,
)
class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
if 'user_agent' in info_dict:
headers['Youtubedl-user-agent'] = info_dict['user_agent']
if 'http_referer' in info_dict:
headers['Referer'] = info_dict['http_referer']
basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers)
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error(u'giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
data_len_str = format_bytes(data_len)
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
while True:
# Download and write
before = time.time()
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
after = time.time()
if len(data_block) == 0:
break
byte_counter += len(data_block)
# Open file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error(u'unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
# Progress message
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
if data_len is None:
eta = percent = None
else:
percent = self.calc_percent(byte_counter, data_len)
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent, data_len_str, speed, eta)
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'eta': eta,
'speed': speed,
})
if is_test and byte_counter == data_len:
break
# Apply rate limit
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
return True
|
riking/youtube-dl
|
youtube_dl/downloader/http.py
|
Python
|
unlicense
| 8,667
| 0.002308
|
from jx_elasticsearch.es52.painless._utils import Painless, LIST_TO_PIPE
from jx_elasticsearch.es52.painless.add_op import AddOp
from jx_elasticsearch.es52.painless.and_op import AndOp
from jx_elasticsearch.es52.painless.basic_add_op import BasicAddOp
from jx_elasticsearch.es52.painless.basic_eq_op import BasicEqOp
from jx_elasticsearch.es52.painless.basic_index_of_op import BasicIndexOfOp
from jx_elasticsearch.es52.painless.basic_mul_op import BasicMulOp
from jx_elasticsearch.es52.painless.basic_starts_with_op import BasicStartsWithOp
from jx_elasticsearch.es52.painless.basic_substring_op import BasicSubstringOp
from jx_elasticsearch.es52.painless.boolean_op import BooleanOp
from jx_elasticsearch.es52.painless.case_op import CaseOp
from jx_elasticsearch.es52.painless.coalesce_op import CoalesceOp
from jx_elasticsearch.es52.painless.concat_op import ConcatOp
from jx_elasticsearch.es52.painless.count_op import CountOp
from jx_elasticsearch.es52.painless.date_op import DateOp
from jx_elasticsearch.es52.painless.div_op import DivOp
from jx_elasticsearch.es52.painless.eq_op import EqOp
from jx_elasticsearch.es52.painless.es_script import EsScript
from jx_elasticsearch.es52.painless.exists_op import ExistsOp
from jx_elasticsearch.es52.painless.exp_op import ExpOp
from jx_elasticsearch.es52.painless.find_op import FindOp
from jx_elasticsearch.es52.painless.first_op import FirstOp
from jx_elasticsearch.es52.painless.floor_op import FloorOp
from jx_elasticsearch.es52.painless.gt_op import GtOp
from jx_elasticsearch.es52.painless.gte_op import GteOp
from jx_elasticsearch.es52.painless.in_op import InOp
from jx_elasticsearch.es52.painless.integer_op import IntegerOp
from jx_elasticsearch.es52.painless.is_number_op import IsNumberOp
from jx_elasticsearch.es52.painless.leaves_op import LeavesOp
from jx_elasticsearch.es52.painless.length_op import LengthOp
from jx_elasticsearch.es52.painless.literal import Literal
from jx_elasticsearch.es52.painless.lt_op import LtOp
from jx_elasticsearch.es52.painless.lte_op import LteOp
from jx_elasticsearch.es52.painless.max_op import MaxOp
from jx_elasticsearch.es52.painless.min_op import MinOp
from jx_elasticsearch.es52.painless.missing_op import MissingOp
from jx_elasticsearch.es52.painless.mod_op import ModOp
from jx_elasticsearch.es52.painless.mul_op import MulOp
from jx_elasticsearch.es52.painless.ne_op import NeOp
from jx_elasticsearch.es52.painless.not_left_op import NotLeftOp
from jx_elasticsearch.es52.painless.not_op import NotOp
from jx_elasticsearch.es52.painless.number_op import NumberOp
from jx_elasticsearch.es52.painless.or_op import OrOp
from jx_elasticsearch.es52.painless.prefix_op import PrefixOp
from jx_elasticsearch.es52.painless.string_op import StringOp
from jx_elasticsearch.es52.painless.sub_op import SubOp
from jx_elasticsearch.es52.painless.suffix_op import SuffixOp
from jx_elasticsearch.es52.painless.tuple_op import TupleOp
from jx_elasticsearch.es52.painless.union_op import UnionOp
from jx_elasticsearch.es52.painless.variable import Variable
from jx_elasticsearch.es52.painless.when_op import WhenOp
from jx_elasticsearch.es52.painless.false_op import FalseOp, false_script
from jx_elasticsearch.es52.painless.true_op import TrueOp, true_script
from jx_elasticsearch.es52.painless.null_op import NullOp, null_script
Painless.register_ops(vars())
|
klahnakoski/SpotManager
|
vendor/jx_elasticsearch/es52/painless/__init__.py
|
Python
|
mpl-2.0
| 3,355
| 0.000596
|
# Building inheritance
class MITPerson(Person):
nextIdNum = 0 #next ID number to assing
def __init__(self, name):
Person.__init__(self, name) #initialize Person attributes
# new MITPerson atrribute: a unique ID number
self.idNum = MITPerson.nextIdNum
MITPerson.nextIdNum += 1
def getIdNum(self):
return self.idNum
def __It__(self, other):
return self.idNum < other.idNum
class Student(MITPerson):
pass
class UG(Student): #UG = under graduate ###------
def __init__(self, name, classYear):
MITPerson.__init__(self, name)
self.year = classYear
def getClass(self): # getter method
return self.year
class Grad(Student): ##----
pass
class TransferStudent(Student):
pass
def isStudent(obj):
return isinstance(obj, Student)
|
teichopsia-/python_practice
|
old_class_material/MITPerson_class.py
|
Python
|
mpl-2.0
| 903
| 0.026578
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.