repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
silenci/neutron
|
neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
|
Python
|
apache-2.0
| 17,661
| 0.001019
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as n_constants
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = oslo_messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
mac = port['mac_address']
pci_slot = None
if port.get('binding:profile'):
pci_slot = port['binding:profile'].get('pci_slot')
if pci_slot:
self.agent.updated_devices.add((mac, pci_slot))
LOG.debug("port_update RPC received for port: %(id)s with MAC "
"%(mac)s and PCI slot %(pci_slot)s slot",
{'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
"skipping", {'id': port['id'], 'mac': mac,
'pci_slot': pci_slot})
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.conf = cfg.CONF
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,
'topic': n_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.mac_to_port_id_mapping = {}
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices_info())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _create_agent_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
mgr = ext_manager.AgentExtensionsManager(self.conf)
mgr.initialize(connection, 'sriov')
return mgr
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices_info()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
d
|
evice_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
|
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | r
|
chimmu/hailuo
|
acceptor.py
|
Python
|
gpl-2.0
| 474
| 0.006329
|
from conn import Connection
import dispatch
import socket
class Acceptor(Connection):
def __init__(self, port):
self.dispatcher = dispatch.Dispatch(1)
self.dispatcher.start()
self.sock = socket.socket(
|
socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock
|
.bind(("127.0.0.1", port))
self.sock.listen(1024)
def handleRead(self):
cli, addr = self.sock.accept()
# cli.setblocking(0)
self.dispatcher.dispatch(cli)
|
xuehao/stickpython
|
Flask_Web_Development/chapter_02/2c/hello_2c.py
|
Python
|
mit
| 320
| 0.003125
|
from flask import Flask
from flask.ext.script import Manager
app = Flask(__name__)
manager = Manager(ap
|
p)
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
@app.route('/user/<name>')
def user(name):
return '<h1>Hello, {name}!</h1>'.format(**locals())
|
if __name__ == '__main__':
manager.run()
|
sugimotokun/VirtualCurrencySplunk
|
bin/scripts/vc_usd_nt.py
|
Python
|
apache-2.0
| 1,321
| 0.004542
|
#
# Copyright (c) 2017 Sugimoto Takaaki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import urllib
import json
from collections import OrderedDict
# dictionary of api url
d = OrderedDict()
d['btc']='https://api.cryptonator.com/api/ticker/btc-usd'
d['ltc']='https://api.cryptonator.com/api/ticker/ltc-usd'
d['doge']='ht
|
tps://api.cryptonator.com/api/ticker/doge-usd'
d['xrp']='https://api.cryptonator.com/api/ticker/xrp-usd'
d['eth']='https://api.cryptonator.com/api/ticker/eth-usd'
d['mona']='https://api.cryptonator.com/api/ticker/mona-usd'
outputString = ""
for url in d.values():
sock = urllib.urlopen(url)
|
jsonString = sock.read()
sock.close()
jsonCurrency = json.loads(jsonString)
price = jsonCurrency['ticker']['price']
outputString = outputString + price + " "
print outputString
|
tanglei528/nova
|
nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
|
Python
|
apache-2.0
| 19,168
| 0.000313
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
|
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
|
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageTest, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?'
'detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 200)
res_dict = jsonuti
|
jorvis/biocode
|
sandbox/jorvis/correct_RNAs_missing_genes.py
|
Python
|
mit
| 2,324
| 0.009897
|
#!/usr/bin/env python3
"""
This housekeeping script reads a GFF3 file and writes a new one, adding a 'gene'
row for any RNA feature which doesn't have one. The coordinates of the RNA will
be copied.
The initial use-case here was a GFF file dumped from WebApollo which had this issue.
In this particular use case, the orphan mRNAs have ID attributes but no Parent
though this is corrected.
INPUT EXAMPLE:
###
ChromosomeII_BmicrotiR1 IGS mRNA 1467897 1468187 . + . Name=ChromosomeII_BmicrotiR1:1467871-1468187;ID=101D714C468A44840D49A6FAAD27AFE5
ChromosomeII_BmicrotiR1 IGS exon 1467897 1468187 . + . Name=DE1443B2DABA5DEDBDEBE79EB433EEB8;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=DE1443B2DABA5DEDBDEBE79EB433EEB8
ChromosomeII_BmicrotiR1 IGS CDS 1467897 1468187 . + 0 Name=101D714C468A44840D49A6FAAD27AFE5-CDS;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=101D714C468A44840D49A6FAAD27AFE5-CDS
Author: Joshua Orvis
"""
import argparse
from biocode import gff
def main():
parser = argparse.ArgumentParser( description='Adds gene features for RN
|
As which lack them')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input GFF3 file' )
parser.add_argument('-o', '--output', type=str, required=True, help='Output GFF3 file to write' )
args = parser.parse_args()
infile = open(args.input)
ofh = open(args.output, 'wt')
for line in infil
|
e:
if line.startswith('#'):
ofh.write(line)
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
ofh.write("{0}\n".format(line) )
continue
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
if cols[2].endswith('RNA') and parent is None:
gene_cols = list(cols)
gene_cols[2] = 'gene'
gene_cols[8] = gff.set_column_9_value(gene_cols[8], 'ID', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(gene_cols)) )
cols[8] = gff.set_column_9_value(cols[8], 'Parent', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(cols)) )
else:
ofh.write("{0}\n".format(line) )
if __name__ == '__main__':
main()
|
aeklant/scipy
|
scipy/stats/tests/test_rank.py
|
Python
|
bsd-3-clause
| 7,448
| 0.000403
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
class TestTieCorrect(object):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arra
|
ys with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_e
|
qual(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData(object):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
max_rank = lambda a: [sum(i <= j for i in a) for j in a]
ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
|
iamahuman/angr
|
angr/analyses/propagator/engine_base.py
|
Python
|
bsd-2-clause
| 1,026
| 0.001949
|
import logging
from ...engines.light import SimEngineLight
from ...errors import SimEngineError
l = logging.getLogger(name=__name__)
class SimEnginePropagatorBase(SimEngineLight): # pylint:disable=abstract-method
def __init__(self, stack_pointer_tracker=None, project=None):
super().__init__()
# Used in the VEX engine
self._project = project
self.base_state = None
self._load_callback = None
# Used in the AIL
|
engine
self._stack_pointer_tracker = stack_pointer_tracker
def process(self, state, *args, **kwargs):
self.project = kwargs.pop('project', None)
self.base_state = kwargs.pop('base_state', None)
self._load_callback = kwargs.pop('load_callback', None)
try:
self._process(state, None, block=kwargs.pop('block', None))
except SimEngineError as ex:
if kwargs.pop('fail_fast', False) is True:
raise ex
|
l.error(ex, exc_info=True)
return self.state
|
gangadhar-kadam/verve_test_erp
|
erpnext/manufacturing/doctype/production_order/test_production_order.py
|
Python
|
agpl-3.0
| 5,084
| 0.023013
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert(
|
)
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Yea
|
r 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
|
chaowu2009/stereo-vo
|
tools/test_ORB.py
|
Python
|
mit
| 621
| 0.022544
|
i
|
mport numpy as np
import cv2
from matplotlib import pylab as plt
# Ref: http://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/
picNumber = 1
filename = "/home/cwu/project/stereo-calibration/calib_imgs/3/left/left_" + str(picNumber) +".jpg"
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create()
# find the keypoints with STAR
kp = orb.detect
|
(img,None)
# compute the descriptors with BRIEF
kp, des = orb.compute(img, kp)
img = cv2.drawKeypoints(img,kp,None,(0,255,0),4)
cv2.imshow('img',img)
cv2.waitKey(1000)
cv2.imwrite('orb_keypoints.jpg',img)
|
christianwgd/mezzanine
|
mezzanine/galleries/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 1,889
| 0.004235
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
|
from django.db import models, migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '__fi
|
rst__'),
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page', on_delete=models.CASCADE)),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Gallery',
'verbose_name_plural': 'Galleries',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='GalleryImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')),
('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)),
('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery', on_delete=models.CASCADE)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
bases=(models.Model,),
),
]
|
jbzdarkid/Random
|
mismatched.py
|
Python
|
apache-2.0
| 1,737
| 0.011514
|
# -*- coding: utf-8 -*-
from wikitools.api import APIRequest
from wikitools.wiki import Wiki
from wikitools.page import Page
from urllib2 import quote
pairs = [
['"', '"'],
['(', ')'],
['[', ']'],
['{', '}'],
['<!--', '-->'],
['<', '>'],
['<gallery', '</gallery>'],
['<includeonly>', '</includeonly>'],
['<noinclude>', '</noinclude>'],
['<onlyinclude>', '</onlyinclude>'],
['<small>', '</small>'],
['<table>', '</table>'],
['<td>', '</td>'],
['<tr>', '</tr>'],
]
wiki = Wiki('http://wiki.teamfortress.com/w/api.php')
# Returns a list of unmatched element indices.
def find_mismatch(text, pair):
problems = []
for i, char in enumerate(text):
if char == pair[0]:
problems.append(i)
if char == pair[1]:
try:
problems.pop()
except IndexError:
return [i]
return problems
params = {
'action': 'query',
'list': 'allpages',
'apfilterredir': 'nonredirects',
'aplimit': '500',
}
titles = set()
req = APIRequest(wiki, params)
for result in req.queryGen():
for article in result['query']['allpages']:
titles.add(article['title'])
titles = list(titles)
titles.sort(
|
)
print 'Found', len(titles), 'pages'
for title in titles:
page = Page(wiki, title)
page.getWikiText()
text
|
= page.getWikiText().lower()
printed_link = False
for pair in pairs:
if text.count(pair[0]) != text.count(pair[1]):
if not printed_link:
print '='*80
print 'https://wiki.teamfortress.com/w/index.php?action=edit&title=%s' % quote(title.encode('utf-8'))
printed_link = True
indices = find_mismatch(text, pair)
print '-'*80
print pair
for index in indices:
print '-'*80
print text[index-100:index+100]
|
js850/pele
|
pele/takestep/displace.py
|
Python
|
gpl-3.0
| 2,126
| 0.01317
|
'''
Created on Jun 6, 2012
@author: vr274
'''
import numpy as np
from generic import TakestepSlice, TakestepInterface
from pele.utils import rotations
__all__ = ["RandomDisplacement", "UniformDisplacement",
"RotationalDisplacement", "RandomCluster"]
class RandomDisplacement(TakestepSlice):
'''Random displacement on each individual coordinate
RandomDisplacement is the most basic step taking routine. It simply
displaces each coordinate my a random value.
Parameters
----------
stepsize : float
magnitue of random displacement
'''
def __init__(self, stepsize=1.0):
TakestepSlice.__init__(self, stepsize=stepsize)
def takeStep(self, coords, **kwargs):
coords[self.srange] += np.random.uniform(low=-self.stepsize, high=self.stepsize, size=coords[self.srange].shape)
class UniformDisplacement(TakestepSlice):
'''Displace each atom be a uniform random vector
The routine generates a proper uniform random unitvector to displace
atoms.
'''
def takeStep(self, coords, **kwargs):
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
x +
|
= self.stepsize * rotations.vector_random_uniform_hypersphere(3)
class RotationalDisplacement(TakestepSlice):
'''Random rotation for angle axis vector
RotationalDisplacement performs a proper random rotation. If the coordinate array contains
positions and orientations, make sure to specify the cor
|
rect slice for the angle axis
coordinates.
'''
def takeStep(self, coords, **kwargs):
"""
take a random orientational step
"""
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
rotations.takestep_aa(x, self.stepsize)
class RandomCluster(TakestepInterface):
'''Generate a random configuration
'''
def __init__(self, volume=1.0):
self.volume = volume
def takeStep(self, coords, **kwargs):
coords[:] = np.random.random(coords.shape) * (self.volume**(1./3.))
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterpolar/marker/_sizemode.py
|
Python
|
mit
| 537
| 0.001862
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
|
self, plotly_name="sizemode", parent_name="scatterpolar.marker", **kwargs
):
super(SizemodeValidat
|
or, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs
)
|
joaormatos/anaconda
|
Anaconda/pyglet/canvas/win32.py
|
Python
|
gpl-3.0
| 3,404
| 0.00235
|
#!/usr/bin/python
# $Id:$
from base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.win32 import _kernel32, _user32, types, constants
from pyglet.libs.
|
win32.constants import *
from pyglet.libs.win32.types import *
class Win32Display(Display):
def get_screens(self):
screens = []
def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
width = r.right - r.left
height = r.bottom - r.top
screens.append(
Win32Screen(self, hMonitor, r.left, r.top, width, height))
r
|
eturn True
enum_proc_type = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM)
enum_proc_ptr = enum_proc_type(enum_proc)
_user32.EnumDisplayMonitors(NULL, NULL, enum_proc_ptr, 0)
return screens
class Win32Screen(Screen):
_initial_mode = None
def __init__(self, display, handle, x, y, width, height):
super(Win32Screen, self).__init__(display, x, y, width, height)
self._handle = handle
def get_matching_configs(self, template):
canvas = Win32Canvas(self.display, 0, _user32.GetDC(0))
configs = template.match(canvas)
# XXX deprecate config's being screen-specific
for config in configs:
config.screen = self
return configs
def get_device_name(self):
info = MONITORINFOEX()
info.cbSize = sizeof(MONITORINFOEX)
_user32.GetMonitorInfoW(self._handle, byref(info))
return info.szDevice
def get_modes(self):
device_name = self.get_device_name()
i = 0
modes = []
while True:
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
r = _user32.EnumDisplaySettingsW(device_name, i, byref(mode))
if not r:
break
modes.append(Win32ScreenMode(self, mode))
i += 1
return modes
def get_mode(self):
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
_user32.EnumDisplaySettingsW(self.get_device_name(),
ENUM_CURRENT_SETTINGS,
byref(mode))
return Win32ScreenMode(self, mode)
def set_mode(self, mode):
assert mode.screen is self
if not self._initial_mode:
self._initial_mode = self.get_mode()
r = _user32.ChangeDisplaySettingsExW(self.get_device_name(),
byref(mode._mode),
None,
CDS_FULLSCREEN,
None)
if r == DISP_CHANGE_SUCCESSFUL:
self.width = mode.width
self.height = mode.height
def restore_mode(self):
if self._initial_mode:
self.set_mode(self._initial_mode)
class Win32ScreenMode(ScreenMode):
def __init__(self, screen, mode):
super(Win32ScreenMode, self).__init__(screen)
self._mode = mode
self.width = mode.dmPelsWidth
self.height = mode.dmPelsHeight
self.depth = mode.dmBitsPerPel
self.rate = mode.dmDisplayFrequency
class Win32Canvas(Canvas):
def __init__(self, display, hwnd, hdc):
super(Win32Canvas, self).__init__(display)
self.hwnd = hwnd
self.hdc = hdc
|
cadyyan/codeeval
|
python/40_self_describing_numbers.py
|
Python
|
gpl-3.0
| 379
| 0.007916
|
import re
import sys
def is_s
|
elf_describing(n):
for i in range(len(n)):
c = n[i]
if int(c) != len(re.fin
|
dall(str(i), n)):
return False
return True
with open(sys.argv[1], 'r') as fh:
for line in fh.readlines():
line = line.strip()
if line == '':
continue
print 1 if is_self_describing(line) else 0
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.py
|
Python
|
mit
| 476
| 0.046218
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.iff"
result.attribute_template_id = 9
result.stfNa
|
me("npc_name","rodian_base_female")
#### BEGIN MODIFICATIONS ####
#### END MOD
|
IFICATIONS ####
return result
|
glidernet/ogn-python
|
app/main/matplotlib_service.py
|
Python
|
agpl-3.0
| 1,400
| 0.002857
|
from app import db
from app.model import DirectionStatistic
import random
import numpy as np
import matplotlib.pyplot as plt
from
|
matplotlib.figure import Figure
def create_range_figure2(sender_id):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
def create_range_figure(sender_id):
sds = db.session.query(DirectionStatistic) \
.filter(DirectionStatistic.sender_id == sender_id) \
.order_by(DirectionStatistic.directions_count.desc()) \
.limit(1) \
.one()
f
|
ig = Figure()
direction_data = sds.direction_data
max_range = max([r['max_range'] / 1000.0 for r in direction_data])
theta = np.array([i['direction'] / 180 * np.pi for i in direction_data])
radii = np.array([i['max_range'] / 1000 if i['max_range'] > 0 else 0 for i in direction_data])
width = np.array([13 / 180 * np.pi for i in direction_data])
colors = plt.cm.viridis(radii / max_range)
ax = fig.add_subplot(111, projection='polar')
ax.bar(theta, radii, width=width, bottom=0.0, color=colors, edgecolor='b', alpha=0.5)
#ax.set_rticks([0, 25, 50, 75, 100, 125, 150])
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
fig.suptitle(f"Range between sender '{sds.sender.name}' and receiver '{sds.receiver.name}'")
return fig
|
souravbadami/oppia
|
core/domain/visualization_registry.py
|
Python
|
apache-2.0
| 2,447
| 0
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for visualizations."""
import inspect
from extensions.visualizations import models
class Registry(object):
"""Registry of all visualizations."""
# Dict mapping visualization class names to their classes.
visualizations_dict = {}
@classmethod
def _refresh_registry(cls):
"""Clears and adds new visualization instances to the registry."""
cls.visualizations_dict.clear()
# Add new visualization instances to the registry.
for name, clazz in inspect.getmembers(
models, predicate=inspect.isclass):
if name.endswith('_test') or name == 'BaseVisualization':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseVisualization' in ancestor_names:
cls.visualizations_dict[clazz.__name__] = clazz
@classmethod
def get_visualization_class(cls, visualization_id):
"""Gets a visualization class by its id (which is also its class name).
The registry will refresh if the desired class is not found. If it's
still not found after the refresh, this method will throw an error.
"""
if visualization_id not in cls.visualizations_dict:
cls._refresh_registry()
if visualization_id not in cls.visualizations_dict:
raise TypeError(
'\'%s\' is not a valid visualization id.' % visualization_id)
|
return cls.visua
|
lizations_dict[visualization_id]
@classmethod
def get_all_visualization_ids(cls):
"""Gets a visualization class by its id
(which is also its class name).
"""
if not cls.visualizations_dict:
cls._refresh_registry()
return cls.visualizations_dict.keys()
|
OptimoJoe/Optizelle
|
src/python/Optizelle/Unconstrained/Algorithms.py
|
Python
|
bsd-2-clause
| 863
| 0.011587
|
__all__ = [
"getMin"
]
__doc__ = "Different algorithms used for optimization"
import Optizelle.Unconstrained.State
import Optizelle.Unconstrained.Functions
from Optizelle.Utility import *
from Optizelle.Properties import *
from Optizelle.Functions import
|
*
def getMin(X, msg, fns, state, smanip=None):
"""Solves an unconstrained optimization problem
Basic solve: getMin(X,msg,fns,state)
Solve with a state manipulator: getMin(X,msg,fns,state,smanip)
"""
if smanip is None:
smanip = StateManipulator()
# Check the arguments
checkVectorSpace("X",X)
checkMess
|
aging("msg",msg)
Optizelle.Unconstrained.Functions.checkT("fns",fns)
Optizelle.Unconstrained.State.checkT("state",state)
checkStateManipulator("smanip",smanip)
# Call the optimization
UnconstrainedAlgorithmsGetMin(X,msg,fns,state,smanip)
|
google/timesketch
|
api_client/python/timesketch_api_client/user.py
|
Python
|
apache-2.0
| 3,300
| 0
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API client library."""
import logging
from . import error
from . import resource
logger = logging.getLogger('timesketch_api.user')
class User(resource.BaseResource):
"""User object."""
def __init__(self, api):
"""Initializes the user object."""
self._object_data = None
resource_uri = 'users/me/'
super().__init__(api, resource_uri)
def _get_data(self):
"""Returns dict from the first object of the resource data."""
if self._object_data:
return self._object_data
data = self.data
objects = data.get('objects')
if objects:
self._object_data = objects[0]
else:
self._object_data = {}
return self._object_data
def change_password(self, new_password):
"""Change the password for the user.
Args:
new_password (str): String with the password.
Raises:
ValueError: If there was an error.
Returns:
Boolean: Whether the password was successfully modified.
|
"""
if not new_password:
raise ValueError('No new password supplied.')
if not isinstance(new_password, str):
raise ValueError('Password needs to be a string value.')
data = {'password': new_password}
resource_url = f
|
'{self.api.api_root}/{self.resource_uri}'
response = self.api.session.post(resource_url, json=data)
return error.check_return_status(response, logger)
@property
def groups(self):
"""Property that returns the groups the user belongs to."""
data = self._get_data()
groups = data.get('groups', [])
return [x.get('name', '') for x in groups]
@property
def is_active(self):
"""Property that returns bool indicating whether the user is active."""
data = self._get_data()
return data.get('active', True)
@property
def is_admin(self):
"""Property that returns bool indicating whether the user is admin."""
data = self._get_data()
return data.get('admin', False)
@property
def username(self):
"""Property that returns back the username of the current user."""
data = self._get_data()
return data.get('username', 'Unknown')
def __str__(self):
"""Returns a string representation of the username."""
user_strings = [self.username]
if self.is_active:
user_strings.append('[active]')
else:
user_strings.append('[inactive]')
if self.is_admin:
user_strings.append('<is admin>')
return ' '.join(user_strings)
|
LaurentCabaret/pyVhdl2Sch
|
tools/tools.py
|
Python
|
bsd-2-clause
| 177
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import
|
os
import sys
class Options:
def __init__(self):
self.color =
|
"black"
self.verbose = False
pass
|
Prashant-Surya/addons-server
|
src/olympia/stats/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,979
| 0
|
# -*- coding: utf-8 -*-
import csv
import datetime
import os
import shutil
import json
from django.http import Http404
from django.test.client import RequestFactory
import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.api.tests.utils import APIKeyAuthTestCase
from olympia.bandwagon.models import Collection
from olympia.stats import views, tasks
from olympia.stats import search
from olympia.stats.models import (
CollectionCount, DownloadCount, GlobalStat, ThemeUserCount, UpdateCount,
ThemeUpdateCount)
from olympia.stats.management.commands import save_stats_to_file
from olympia.users.models import UserProfile
class StatsTest(TestCase):
fixtures = ['stats/test_views.json', 'stats/test_models.json']
def setUp(self):
"""Setup some reasonable testing defaults."""
super(StatsTest, self).setUp()
# Default url_args to an addon and range with data.
self.url_args = {'start': '20090601', 'end': '20090930', 'addon_id': 4}
self.url_args_theme = {'start': '20090601', 'end': '20090930',
'addon_id': 6}
# Most tests don't care about permissions.
self.login_as_admin()
def login_as_admin(self):
self.client.logout()
self.client.login(username='jbalogh@mozilla.com', password='password')
def login_as_visitor(self):
self.client.logout()
self.client.login(username='nobodyspecial@mozilla.com',
password='password')
def get_view_response(self, view, **kwargs):
view_args = self.url_args.copy()
head = kwargs.pop('head', False)
view_args.update(kwargs)
url = reverse(view, kwargs=view_args)
if head:
return self.client.head(url, follow=True)
return self.client.get(url, follow=True)
def views_gen(self, **kwargs):
# common set of views
for series in views.SERIES:
for group in views.SERIES_GROUPS:
view = 'stats.%s_series' % series
args = kwargs.copy()
args['group'] = group
yield (view, args)
def public_views_gen(self, **kwargs):
# all views are potentially public, except for contributions
for view, args in self.views_gen(**kwargs):
if not view.startswith('stats.contributions'):
yield (view, args)
def private_views_gen(self, **kwargs):
# only contributions views are always private
for view, args in self.views_gen(**kwargs):
if view.startswith('stats.contributions'):
yield (view, args)
def _check_it(self, views, status):
for view, kwargs in views:
response = self.get_view_response(view, head=True, **kwargs)
assert response.status_code == status
class TestUnlistedAddons(StatsTest):
def setUp(self):
super(TestUnlistedAddons, self).setUp()
Addon.objects.get(pk=4).update(is_listed=False)
def test_no_stats_for_unlisted_addon(self):
"""All the views for the stats return 404 for unlisted addons."""
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 404)
self._check_it(self.private_views_gen(format='json'), 404)
def test_stats_for_unlisted_addon_owner(self):
"""All the views for the stats return 404 for unlisted addons owner."""
self.login_as_admin()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 200)
class ESStatsTest(StatsTest, amo.tests.ESTestCase):
"""Test class with some ES setup."""
def setUp(self):
super(ESStatsTest, self).setUp()
self.empty_index('stats')
self.index()
def index(self):
updates = UpdateCount.objects.values_list('id', flat=True)
tasks.index_update_counts(list(updates))
downloads = DownloadCount.objects.values_list('id', flat=True)
tasks.index_download_counts(list(downloads))
user_counts = ThemeUserCount.objects.values_list('id', flat=True)
tasks.index_theme_user_counts(list(user_counts))
self.refresh('stats')
def csv_eq(self, response, expected):
content = csv.DictReader(
# Drop lines that are comments.
filter(lambda row: row[0] != '#', response.content.splitlines()))
expected = csv.DictReader(
# Strip any extra spaces from the expected content.
line.strip() for line in expected.splitlines())
assert tuple(content) == tuple(expected)
class TestSeriesSecurity(StatsTest):
"""Tests to make sure all restricted data remains restricted."""
mock_es = True # We're checking only headers, not content.
def test_private_addon_no_groups(self):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.views_gen(format='json'), 403)
def test_private_addon_stats_group(self):
# Logged in with stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group)
self.l
|
ogin_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
|
self._check_it(self.private_views_gen(format='json'), 403)
def test_private_addon_contrib_stats_group(self):
# Logged in with stats and contrib stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 200)
def test_private_addon_anonymous(self):
# Not logged in
self.client.logout()
self._check_it(self.views_gen(format='json'), 403)
def test_public_addon_no_groups(self):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
def test_public_addon_stats_group(self):
# Logged in with stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group)
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
def test_public_addon_contrib_stats_group(self):
# Logged in with stats and contrib stats group.
user = UserProfile.objects.get(email='nobodyspecial@mozilla.com')
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 200)
def test_public_addon_anonymous(self):
# Not logged in
self.client.logout()
self._check_it(self.public_views_gen(addon_id=5, format='json'), 200)
self._check_it(self.private_views_gen(addon_id=5, format='json'), 403)
class TestCSVs(ESStatsTest):
"""Tests for
|
hultberg/ppinnlevering
|
core/migrations/0016_auto_20151001_0714.py
|
Python
|
apache-2.0
| 843
| 0.001186
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0015_auto_20150928_0850'),
]
|
operations = [
migrations.CreateModel(
name='UserVote',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('bidrag', models.ForeignKey(to='core.Bidrag')),
|
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='uservote',
unique_together=set([('bidrag', 'user')]),
),
]
|
gunan/tensorflow
|
tensorflow/python/keras/layers/preprocessing/discretization.py
|
Python
|
apache-2.0
| 4,879
| 0.005329
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from t
|
ensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
INTEGER = "int"
BINARY = "binary"
class Discretization(Layer):
"""Buckets data into discrete ranges.
This layer will place each element of its input data into one of several
contiguous ranges and output either an integer ind
|
ex or a one-hot vector
indicating which range each element was placed in.
What happens in `adapt()`: The dataset is examined and sliced.
Input shape:
Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher.
Output shape:
The same as the input shape if `output_mode` is 'int', or
`[output_shape, num_buckets]` if `output_mode` is 'binary'.
Attributes:
bins: Optional boundary specification. Bins include the left boundary and
exclude the right boundary, so `bins=[0., 1., 2.]` generates bins
`(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`.
output_mode: One of 'int', 'binary'. Defaults to 'int'.
"""
def __init__(self, bins, output_mode=INTEGER, **kwargs):
super(Discretization, self).__init__(**kwargs)
self._supports_ragged_inputs = True
self.bins = bins
self.output_mode = output_mode
def get_config(self):
config = {
"bins": self.bins,
"output_mode": self.output_mode,
}
base_config = super(Discretization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
if self.output_mode == INTEGER:
return input_shape
else:
return tensor_shape.TensorShape([dim for dim in input_shape] +
[len(self.bins)])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def call(self, inputs):
if ragged_tensor.is_ragged(inputs):
integer_buckets = ragged_functional_ops.map_flat_values(
math_ops._bucketize, inputs, boundaries=self.bins) # pylint: disable=protected-access
# Ragged map_flat_values doesn't touch the non-values tensors in the
# ragged composite tensor. If this op is the only op a Keras model,
# this can cause errors in Graph mode, so wrap the tensor in an identity.
integer_buckets = array_ops.identity(integer_buckets)
elif isinstance(inputs, sparse_tensor.SparseTensor):
integer_buckets = math_ops._bucketize( # pylint: disable=protected-access
inputs.values,
boundaries=self.bins)
else:
integer_buckets = math_ops._bucketize(inputs, boundaries=self.bins) # pylint: disable=protected-access
if self.output_mode == INTEGER:
if isinstance(inputs, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(
indices=array_ops.identity(inputs.indices),
values=integer_buckets,
dense_shape=array_ops.identity(inputs.dense_shape))
return integer_buckets
else:
if isinstance(inputs, sparse_tensor.SparseTensor):
raise ValueError("`output_mode=binary` is not supported for "
"sparse input")
# The 'bins' array is the set of boundaries between the bins. We actually
# have 'len(bins)+1' outputs.
# TODO(momernick): This will change when we have the ability to adapt().
return array_ops.one_hot(integer_buckets, depth=len(self.bins) + 1)
|
rays/ipodderx-core
|
khashmir/khash.py
|
Python
|
mit
| 3,533
| 0.01019
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
from sha import sha
from random import randint
#this is ugly, hopefully os.entropy will be in 2.4
try:
from entropy import entropy
except ImportError:
def entropy(n):
s = ''
for i in range(n):
s += chr(randint(0,255))
return s
def intify(hstr):
"""20 bit hash, big-endian -> long python integer"""
assert len(hstr) == 20
return long(hstr.encode('hex'), 16)
def stringify(num):
"""long int -> 20-character string"""
str = hex(num)[2:]
if str[-1] == 'L':
str = str[:-1]
if len(str) % 2 != 0:
str = '0' + str
str = str.decode('hex')
return (20 - len(str)) *'\x00' + str
def distance(a, b):
"""distance between two 160-bit hashes expressed as 20-character strings"""
return intify(a) ^ intify(b)
def newID():
"""returns a new pseudorandom globally unique ID string"""
h = sha()
h.update(entropy(20))
return h.digest()
def newIDInRange(min, max):
return stringify(randRange(min,max))
def randRange(min, max):
return min + intify(newID()) % (max - min)
def newTID():
return randRange(-2**30, 2**
|
30)
### Test Cases ###
import unittest
class NewID(unittest.TestCase):
def testLength(self):
self.assertEqual(len(newID()), 20)
def t
|
estHundreds(self):
for x in xrange(100):
self.testLength
class Intify(unittest.TestCase):
known = [('\0' * 20, 0),
('\xff' * 20, 2L**160 - 1),
]
def testKnown(self):
for str, value in self.known:
self.assertEqual(intify(str), value)
def testEndianessOnce(self):
h = newID()
while h[-1] == '\xff':
h = newID()
k = h[:-1] + chr(ord(h[-1]) + 1)
self.assertEqual(intify(k) - intify(h), 1)
def testEndianessLots(self):
for x in xrange(100):
self.testEndianessOnce()
class Disantance(unittest.TestCase):
known = [
(("\0" * 20, "\xff" * 20), 2**160L -1),
((sha("foo").digest(), sha("foo").digest()), 0),
((sha("bar").digest(), sha("bar").digest()), 0)
]
def testKnown(self):
for pair, dist in self.known:
self.assertEqual(distance(pair[0], pair[1]), dist)
def testCommutitive(self):
for i in xrange(100):
x, y, z = newID(), newID(), newID()
self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z))
class RandRange(unittest.TestCase):
def testOnce(self):
a = intify(newID())
b = intify(newID())
if a < b:
c = randRange(a, b)
self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a))
else:
c = randRange(b, a)
assert b <= c < a, "output out of range %d %d %d" % (b, c, a)
def testOneHundredTimes(self):
for i in xrange(100):
self.testOnce()
if __name__ == '__main__':
unittest.main()
|
dbaynard/pynomo
|
examples/ex_compound_nomo_1.py
|
Python
|
gpl-3.0
| 4,466
| 0.031572
|
"""
ex_compound_nomo_1.py
Compound nomograph: (A+B)/E=F/(CD)
Copyright (C) 2007-2009 Leif Roschier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, "..")
from pynomo.nomographer import *
# type 1
A_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$A$',
'tick_levels':2,
'tick_text_levels':1,
}
B_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$B$',
'tick_levels':2,
'tick_text_levels':1,
}
R1a_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:-u,
'title':'',
'tick_levels':0,
'tick_text_levels':0,
'tag':'r1'
}
block_1_params={
'block_type':'type_1',
'width':10.0,
'height':10.0,
'f1_params':A_params,
'f2_params':B_params,
'f3_params':R1a_params,
'isopleth_values':[[1,7,'x']]
}
# type 4
R1b_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$R_1$',
'tick_level
|
s':0,
'tick_text_levels':
|
0,
'tick_side':'right',
'title_draw_center':True,
'title_opposite_tick':False,
'tag':'r1'
}
E_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$E$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'right',
'title_draw_center':True,
'title_opposite_tick':False,
}
F_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$F$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'left',
'title_draw_center':True,
'title_opposite_tick':True,
}
R2a_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$R_2$',
'tick_levels':0,
'tick_text_levels':0,
'tick_side':'left',
'title_draw_center':True,
'title_opposite_tick':False,
'tag':'r2'
}
block_2_params={
'block_type':'type_4',
'f1_params':R1b_params,
'f2_params':E_params,
'f3_params':F_params,
'f4_params':R2a_params,
'mirror_x':True,
'isopleth_values':[['x',9,4,'x']]
}
# type 2 N
R2b_params={
'u_min':0.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$$',
'tick_levels':0,
'tick_text_levels':0,
'tag':'r2'
}
C_params={
'u_min':0.5,
'u_max':5.0,
'function':lambda u:u,
'title':r'$C$',
'tick_levels':3,
'tick_text_levels':1,
'tick_side':'left',
'scale_type':'linear smart',
}
D_params={
'u_min':1.0,
'u_max':10.0,
'function':lambda u:u,
'title':r'$D$',
'tick_levels':3,
'tick_text_levels':1,
}
block_3_params={
'block_type':'type_2',
'width':10.0,
'height':10.0,
'f1_params':R2b_params,
'f2_params':C_params,
'f3_params':D_params,
'mirror_y':True,
'isopleth_values':[['x',1,'x']]
}
main_params={
'filename':'ex_compound_nomo_1.pdf',
'paper_height':10.0,
'paper_width':10.0,
'block_params':[block_1_params,block_2_params,block_3_params],
'transformations':[('rotate',0.01),('scale paper',)],
}
Nomographer(main_params)
|
Gargamel1989/Seasoning-old
|
Seasoning/general/admin.py
|
Python
|
gpl-3.0
| 103
| 0.009709
|
from django.
|
contrib import admin
from general.models import StaticPage
admin.site.register(StaticP
|
age)
|
imk1/IMKTFBindingCode
|
getSequencesForSNPs.py
|
Python
|
mit
| 2,698
| 0.021497
|
import sys
from Bio import SeqIO
SNPTOPEAKFILENAME = sys.argv[1]
GENOMEFILENAME = sys.argv[2]
DISTANCE = int(sys.argv[3])
BINDALLELESEQFILENAME = sys.argv[4]
NONBINDALLELEFILENAME = sys.argv[5]
FIRSTPEAKCOL = int(sys.argv[6]) # 0-INDEXED
def getSNPInfo(SNPToPeakLine):
# Get the SNP and peak location from the current line
if SNPToPeakLine == "":
# At the end of the SNP to peak file, so stop
return [("", 0), ("", ""), ("", 0)]
SNPToPeakLineElements = SNPToPeakLine.split("\t")
return [(SNPToPeakLineElements[0], int(SNPToPeakLineElements[1])), (SNPToPeakLineElements[2], SNPToPeakLineElements[3]), (SNPToPeakLineElements[FIRSTPEAKCOL], int(SNPToPeakLineElements[FIRSTPEAKCOL+1]))]
def getSequencesForSNPs():
# For each SNP, get the sequence of its peak +/- distances with the binding and non-binding alleles
SNPToPeakFile = open(SNPTOPEAKFILENAME)
[SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip())
lastPeakLocation = ("", 0)
bindAlleleSeq = ""
nonBindAlleleSeq = ""
bindAlleleSeqFile = open(BINDALLELESEQFILENAME, 'w+')
nonBindAlleleSeqFile = open(NONBINDALLELEFILENAME, 'w+')
numSharingPeak = 0
for seqRecord in SeqIO.parse(GENOMEFILENAME, "fasta"):
# Iterate through the chromosomes and get the sequences surrounding each SNP in each chromosome
# Combine SNPs that are in the same peak, and ASSUME THAT THEY ARE IN LD AND THE BINDING ALLELES CORRESPOND TO EACH OTHER
while seqRecord.id == SNPLocation[0]:
# Iterate through all SNPs on the current chromosome
if peakLocation != lastPeakLocation:
# At a new peak
if lastPeakLocation[0] != "":
# Record the last peak
bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n")
nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n")
bindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1]))
nonBindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1]))
else:
numSharingPeak = numSharingPeak + 1
SNPLocationInSeq = DISTANCE - (peakLocation[1] - SNPLocat
|
ion[1]) - 1
bindAlleleSeq[SNPLocationInSeq] = SNPAlleles[0]
nonBindAlleleSeq[SNPLocationInSeq] = SNPAlleles[1]
lastPeakLocation =
|
peakLocation
[SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip())
print numSharingPeak
bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n")
nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n")
SNPToPeakFile.close()
bindAlleleSeqFile.close()
nonBindAlleleSeqFile.close()
if __name__=="__main__":
getSequencesForSNPs()
|
matbra/radio_fearit
|
build/lib/python3.3/site-packages/pocketsphinx-0.0.9-py3.3-linux-x86_64.egg/pocketsphinx/pocketsphinx.py
|
Python
|
gpl-3.0
| 17,246
| 0.010495
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
This documentation was automatically generated using original comments in
Doxygen format. As some C types and data structures cannot be directly mapped
into Python types, some non-trivial type conversion could have place.
Basically a type is replaced with another one that has the closest match, and
sometimes one argument of generated function comprises several arguments of the
original function (usually two).
Functions having error code as the return value and returning effective
value in one of its arguments are transformed so that the effective value is
returned in a regular fashion and run-time exception is being thrown in case of
negative error code.
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, path
|
name, description = imp.find_module('_pocketsphinx', [dirname(__file__)])
except ImportError:
import _pocketsphinx
return _pocketsphinx
if fp is not None:
try:
_mod = imp.load_module('_pocket
|
sphinx', fp, pathname, description)
finally:
fp.close()
return _mod
_pocketsphinx = swig_import_helper()
del swig_import_helper
else:
import _pocketsphinx
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import sphinxbase
class Hypothesis(object):
"""Proxy of C Hypothesis struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
hypstr = _swig_property(_pocketsphinx.Hypothesis_hypstr_get, _pocketsphinx.Hypothesis_hypstr_set)
best_score = _swig_property(_pocketsphinx.Hypothesis_best_score_get, _pocketsphinx.Hypothesis_best_score_set)
prob = _swig_property(_pocketsphinx.Hypothesis_prob_get, _pocketsphinx.Hypothesis_prob_set)
def __init__(self, *args):
"""__init__(Hypothesis self, char const * hypstr, int best_score, int prob) -> Hypothesis"""
this = _pocketsphinx.new_Hypothesis(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_Hypothesis
__del__ = lambda self : None;
Hypothesis_swigregister = _pocketsphinx.Hypothesis_swigregister
Hypothesis_swigregister(Hypothesis)
class Segment(object):
"""Proxy of C Segment struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
word = _swig_property(_pocketsphinx.Segment_word_get, _pocketsphinx.Segment_word_set)
ascore = _swig_property(_pocketsphinx.Segment_ascore_get, _pocketsphinx.Segment_ascore_set)
lscore = _swig_property(_pocketsphinx.Segment_lscore_get, _pocketsphinx.Segment_lscore_set)
lback = _swig_property(_pocketsphinx.Segment_lback_get, _pocketsphinx.Segment_lback_set)
prob = _swig_property(_pocketsphinx.Segment_prob_get, _pocketsphinx.Segment_prob_set)
start_frame = _swig_property(_pocketsphinx.Segment_start_frame_get, _pocketsphinx.Segment_start_frame_set)
end_frame = _swig_property(_pocketsphinx.Segment_end_frame_get, _pocketsphinx.Segment_end_frame_set)
def fromIter(*args):
"""fromIter(ps_seg_t * itor) -> Segment"""
return _pocketsphinx.Segment_fromIter(*args)
fromIter = staticmethod(fromIter)
__swig_destroy__ = _pocketsphinx.delete_Segment
__del__ = lambda self : None;
def __init__(self):
"""__init__(Segment self) -> Segment"""
this = _pocketsphinx.new_Segment()
try: self.this.append(this)
except: self.this = this
Segment_swigregister = _pocketsphinx.Segment_swigregister
Segment_swigregister(Segment)
def Segment_fromIter(*args):
"""Segment_fromIter(ps_seg_t * itor) -> Segment"""
return _pocketsphinx.Segment_fromIter(*args)
class NBest(object):
"""Proxy of C NBest struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
hypstr = _swig_property(_pocketsphinx.NBest_hypstr_get, _pocketsphinx.NBest_hypstr_set)
score = _swig_property(_pocketsphinx.NBest_score_get, _pocketsphinx.NBest_score_set)
def fromIter(*args):
"""fromIter(ps_nbest_t * itor) -> NBest"""
return _pocketsphinx.NBest_fromIter(*args)
fromIter = staticmethod(fromIter)
def hyp(self):
"""hyp(NBest self) -> Hypothesis"""
return _pocketsphinx.NBest_hyp(self)
__swig_destroy__ = _pocketsphinx.delete_NBest
__del__ = lambda self : None;
def __init__(self):
"""__init__(NBest self) -> NBest"""
this = _pocketsphinx.new_NBest()
try: self.this.append(this)
except: self.this = this
NBest_swigregister = _pocketsphinx.NBest_swigregister
NBest_swigregister(NBest)
def NBest_fromIter(*args):
"""NBest_fromIter(ps_nbest_t * itor) -> NBest"""
return _pocketsphinx.NBest_fromIter(*args)
class SegmentIterator(object):
"""Proxy of C SegmentIterator struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ptr = _swig_property(_pocketsphinx.SegmentIterator_ptr_get, _pocketsphinx.SegmentIterator_ptr_set)
def __init__(self, *args):
"""__init__(SegmentIterator self, ps_seg_t * ptr) -> SegmentIterator"""
this = _pocketsphinx.new_SegmentIterator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pocketsphinx.delete_SegmentIterator
__del__ = lambda self : None;
def next(self):
"""next(SegmentIterator self) -> Segment"""
return _pocketsphinx.SegmentIterator_next(self)
def __next__(self):
"""__next__(SegmentIterator self) -> Segment"""
return _pocketsphinx.SegmentIterator___next__(self)
SegmentIterator_swigregister = _pocketsphinx.SegmentIterator_swigregister
SegmentIterator_swigregister(SegmentIterator)
class NBestIterator(object):
"""Proxy of C NBestIterator struct"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ptr = _swig_property(_pocketsphinx.NBestIterator_ptr_get, _pocketsphinx.NBestIterator_ptr_set)
def __init__(self, *args):
"""__init__(NBestIterator self, ps_nbest_t * ptr) -> NBestIter
|
pagarme/pagarme-python
|
tests/resources/dictionaries/subscription_dictionary.py
|
Python
|
mit
| 1,513
| 0
|
from pagarme import card
from pagarme import plan
from tests.resources import pagarme_test
from tests.resources.dictionaries import card_dictionary
from tests.resources.dictionaries import customer_dictionary
from tests.resources.dictionaries import plan_dictionary
from tests.resources.dictionaries import transaction_dictionary
CARD = card.create(card_dictionary.VALID_CARD)
NO_TRIAL_PLAN = plan.create(plan_dictionary.NO_TRIAL_PLAN)
POSTBACK_URL = pagarme_test.create_postback_url()
BOLETO_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"payment_method": "boleto",
"postback_url": POSTBACK_URL,
"split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE
}
BOLETO_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"payment_method": "boleto
|
",
"postback_url": POSTBACK_URL
}
CHARGES = {
"charges": "1"
}
CREDIT_CARD_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dictionary.CUSTOMER,
"card_id": CARD['id'],
"payment_method": "credit_card",
"postback_url": POSTBACK_URL,
"split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE
}
CREDIT_CARD_SUBSCRIPTION = {
"plan_id": NO_TRIAL_PLAN['id'],
"customer": customer_dic
|
tionary.CUSTOMER,
"card_id": CARD['id'],
"payment_method": "credit_card",
"postback_url": POSTBACK_URL
}
UPDATE = {
"payment_method": "boleto"
}
|
etingof/pyasn1-modules
|
tests/test_rfc7292.py
|
Python
|
bsd-2-clause
| 8,295
| 0.000362
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc7292
class PKCS12TestCase(unittest.TestCase):
pfx_pem_text = """\
MIIJ0wIBAzCCCY8GCSqGSIb3DQEHAaCCCYAEggl8MIIJeDCCBggGCSqGSIb3DQEHAaCCBfkE
ggX1MIIF8TCCBe0GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAjuq0/+
0pyutQICB9AEggTYZe/mYBpmkDvKsve4EwIVwo1TNv4ldyx1qHZW2Ih6qQCY+Nv1Mnv9we0z
UTl4p3tQzCPWXnrSA82IgOdotLIez4YwXrgiKhcIkSSL+2yCmAoM+qkjiAIKq+l3UJ6Xhafe
2Kg4Ek/0RkHpe6GwjTtdefkpXpZgccMEopOtKQMLJWsDM7p77x/amn6yIk2tpskKqUY/4n8Y
xEiTWcRtTthYqZQIt+q94nKLYpt0o880SVOfvdEqp5KII7cTg60GJL+n6oN6hmP0bsAMvnk9
1f8/lFKMi9tsNU/KnUhbDVpjJwBQkhgbqBx6GdtoqSLSlYNPVM0wlntwm1JhH4ybiQ5sNzqO
7FlWC5bcYwkvOlx1gGrshY5jK/WjbA4paBpxSkgobJReirY9BeqITnvokXlub4tehHhM20Ik
42pKa3kGaHmowvzflxqE+oysW5Oa9XbZxBCfkOMJ70o4hqa+n66+E/uKcN9NbKbTo3zt3xdt
6ypOwHb74t5OcWaGx3EZsw0n0/V+WoLSpXOBwpx08+1yh7LV29aNQ0oEzVVkF6YYRQZtdIMe
s3xB2i6sjLal21ntk7iBzMJwVoi524SAZ/oW8SuDAn1c93AWWwKZLALv5V3FZ2pDiQXArcfz
DH2d5HJyNx7OlvKzNgEngwSyEC1XbjnOsZVUqGFENuDTa/brH4oEJHEkyWTyDudrz8iCEO80
e1PE4qqJ5CllN0CSVWqz4CxGDFIQXzR6ohn8f3dR3+DAaLYvAjBVMLJjk7+nfnB2L0HpanhT
Fz9AxPPIDf5pBQQwM14l8wKjEHIyfqclupeKNokBUr1ykioPyCr3nf4Rqe0Z4EKIY4OCpW6n
hrkWHmvF7OKR+bnuSk3jnBxjSN0Ivy5q9q3fntYrhscMGGR73umfi8Z29tM1vSP9jBZvirAo
geGf/sfOI0ewRvJf/5abnNg/78Zyk8WmlAHVFzNGcM3u3vhnNpTIVRuUyVkdSmOdbzeSfmqQ
2HPCEdC9HNm25KJt1pD6v6aP3Tw7qGl+tZyps7VB2i+a+UGcwQcClcoXcPSdG7Z1gBTzSr84
MuVPYlePuo1x+UwppSK3rM8ET6KqhGmESH5lKadvs8vdT6c407PfLcfxyAGzjH091prk2oRJ
xB3oQAYcKvkuMcM6FSLJC263Dj+pe1GGEexk1AoysYe67tK0sB66hvbd92HcyWhW8/vI2/PM
bX+OeEb7q+ugnsP+BmF/btWXn9AxfUqNWstyInKTn+XpqFViMIOG4e2xC4u/IvzG3VrTWUHF
4pspH3k7GB/EOLvtbsR0uacBFlsColJy0FaWT9rrdueU3YEiIRCC8LGi1XpUa8f5adeBKWN+
eRTrrF4o7uoNeGlnwZ7ebnb7k18Q0GRzzzTZPoMM4L703svfE/eNYWFHLY4NDQKSYgeum365
WAfZpHOX7YOc6oRGrGB+QuGoyikTTDO8xpcEmb8vDz4ZwHhN0PS056LNJeMoI0A/5DJb3e10
i1txlM48sbZBuIEIeixr52nwG4LuxqXGqShKaTfOrFxHjx4kI4/dp9dN/k8TGFsLWjuIgMJI
6nRHbWrxB3F0XKXagtLLep1MDwDwAuCyiW2YC0JzRvsJViIgjDA+eiHX0O6/8xiK9dzMQpIz
TVHSEqFlhORp0DGB2zATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADMA
ZgA3ADEAYQBmADYANQAtADEANgA4ADcALQA0ADQANABhAC0AOQBmADQANgAtAGMAOABiAGUA
MQA5ADQAYwAzAGUAOABlMGsGCSsGAQQBgjcRATFeHlwATQBpAGMAcgBvAHMAbwBmAHQAIABF
AG4AaABhAG4AYwBlAGQAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBp
AGQAZQByACAAdgAxAC4AMDCCA2gGCSqGSIb3DQEHAaCCA1kEggNVMIIDUTCCA00GCyqGSIb3
DQEMCgEDoIIDJTCCAyEGCiqGSIb3DQEJFgGgggMRBIIDDTCCAwkwggHxoAMCAQICEDbt9oc6
oQinRwE1826MiBEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAxMJYW5vbnltb3VzMCAXDTE2
MDcxOTIyMDAwMVoYDzIxMTYwNjI1MjIwMDAxWjAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8trBCTBjXXA4OgSO5nRTOU5T86ObCgc71
J2oCuUigSddcTDzebaD0wcyAgf101hAdwMKQ9DvrK0nGvm7FAMnnUuVeATafKgshLuUTUUfK
jx4Xif4LoS0/ev4BiOI5a1MlIRZ7T5Cyjg8bvuympzMuinQ/j1RPLIV0VGU2HuDxuuP3O898
GqZ3+F6Al5CUcwmOX9zCs91JdN/ZFZ05SXIpHQuyPSPUX5Vy8F1ZeJ8VG3nkbemfFlVkuKQq
vteL9mlT7z95rVZgGB3nUZL0tOB68eMcffA9zUksOmeTi5M6jnBcNeX2Jh9jS3YYd+IEliZm
mggQG7kPta8f+NqezL77AgMBAAGjVTBTMBUGA1UdJQQOMAwGCisGAQQBgjcKAwQwLwYDVR0R
BCgwJqAkBgorBgEEAYI3FAIDoBYMFGFub255bW91c0B3aW5kb3dzLXgAMAkGA1UdEwQCMAAw
DQYJKoZIhvcNAQEFBQADggEBALh+4qmNPzC6M8BW9/SC2ACQxxPh06GQUGx0D+GLYnp61ErZ
OtKyKdFh+uZWpu5vyYYAHCLXP7VdS/JhJy677ynAPjXiC/LAzrTNvGs74HDotD966Hiyy0Qr
ospFGiplHGRA5vXA2CiKSX+0HrVkN7rhk5PYkc6R+/cdosd+QZ8lkEa9yDWc5l//vWEbzwVy
mJf/PRf8NTkWAK6SPV7Y37j1mhkJjOH9VkRxNrd6kcihRa4u0ImXaXEsec77ER0so31DKCrP
m+rqZPj9NZSIYP3sMGJ4Bmm/n2YRdeaUzTdocfD3TRnKxs65DSgpiSq1gmtsXM7jAPs/Egrg
tbWEypgxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFKVgj/32UdEyuQcB
rqr03dPnboinBBSU7mxdpB5LTCvorCI8Tk5OMiUzjgICB9A=
"""
def setUp(self):
self.asn1Spec = rfc7292.PFX()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pfx_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(3, asn1Object['version'])
oid = asn1Object['macData']['mac']['digestAlgorithm']['algorithm']
self.assertEqual(univ.ObjectIdentifier('1.3.14.3.2.26'), oid)
md_hex = asn1Object['macData']['mac']['digest'].prettyPrint()
self.assertEqual('0xa5608ffdf651d132b90701aeaaf4ddd3e76e88a7', md_hex)
self.assertEqual(
rfc5652.id_data, asn1Object['authSafe']['contentType'])
data, rest = der_decoder(
asn1Object['authSafe']['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
authsafe, rest = der_decoder(data, asn1Spec=rfc7292.AuthenticatedSafe())
self.assertFalse(rest)
self.assertTrue(authsafe.prettyPrint())
self.assertEqual(data, der_encoder(authsafe))
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
data, rest = der_decoder(ci['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
sc, rest = der_decoder(data, asn1Spec=rfc7292.SafeContents())
self.assertFalse(rest)
self.assertTrue(sc.prettyPrint())
self.assertEqual(data, der_encoder(sc))
for sb in sc:
if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
bv, rest = der_decoder(
sb['bagValue'],
asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
self.assertFalse(rest)
self.assertTrue(bv.prettyPrint())
self.assertEqual(sb['bagValue'], der_encoder(bv))
for attr in sb['bagAttributes']:
if attr['attrType'] in rfc5652.cmsAttributesMap:
av, rest = der_decoder(
attr['attrValues'][0],
asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
self.assertFalse(rest)
self.assertTrue(av.prettyPrint())
self.assertEqual(
attr['attrValues'][0], der_encoder(av))
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.pfx_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
digest_alg = asn1Object['macData']['mac']['digestAlgorithm']
self.assertFalse(digest_alg['parame
|
ters'].hasValue())
authsafe, rest = der_decoder(
asn1Object['authSafe']['content'],
asn1Spec=rfc7292.AuthenticatedSafe(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(authsafe.prettyPrint())
self.assertEqual(
asn1Object['authSafe']['content'], der_encode
|
r(authsafe))
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
sc, rest = der_decoder(
ci['content'], asn1Spec=rfc7292.SafeContents(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(sc.prettyPrint())
self.assertEqual(ci['content'], der_encoder(sc))
for sb in sc:
if sb['bagId'] == rfc7292.id_pkcs8ShroudedKeyBag:
bv = sb['bagValue']
enc_alg = bv['encryptionAlgorithm']['algorithm']
self.assertEqual(
rfc7292.pbeWithSHAAnd3_KeyTripleDES_CBC, enc_alg)
enc_alg_param = bv['encryptionAlgorithm']['parameters']
self.assertEqual(2000, enc_alg_param['iterations'])
suite = unittest.TestLoader().loadTest
|
jigarkb/CTCI
|
LeetCode/070-E-ClimbingStairs.py
|
Python
|
mit
| 877
| 0.00114
|
# You are climbing a stair case. It takes n steps to reach to the top.
#
# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
#
# Note: Given n will be a positive integer.
#
# Example 1:
#
# Input: 2
# Output: 2
# Explanation: There are two ways to climb to the top.
#
|
1. 1 step + 1 step
# 2. 2 steps
#
# Example 2:
#
# Input: 3
# Output: 3
# Explanation: There are three ways to climb to the top.
# 1. 1 step + 1 step + 1 step
# 2. 1 step + 2 steps
# 3. 2 steps + 1 step
class Solution(object):
def climbStairs(self, n):
"""
|
:type n: int
:rtype: int
"""
table = [1, 2]
i = 2
while i < n:
table.append(table[i-1] + table[i-2])
i += 1
return table[n-1]
# Note:
# Generate two trees one with 1 step and other with 2 step and add both
|
xyloeric/pi
|
piExp/pi/views.py
|
Python
|
bsd-3-clause
| 101
| 0.029703
|
from
|
django.http import HttpResponse
def hello_world(request):
return HttpResponse("Hello, w
|
orld.")
|
if1live/easylinker
|
easylinker/cli.py
|
Python
|
mit
| 658
| 0.004559
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
from . import config
from . import parsers
def main():
if len(sys.argv) == 2:
filename = sys.argv[1]
filename = parsers.to_unicode(filename)
parsers.run(filename)
|
else:
msg = 'Usage: {} <metadata>'.format(sys.argv[0])
print(msg)
pri
|
nt('\nPredefined Variables')
for k, v in config.PREDEFINED_VARIABLE_TABLE.items():
print('{}\t: {}'.format(k, v))
if __name__ == '__main__':
main()
|
masayukig/tempest
|
tempest/api/image/v2/admin/test_images.py
|
Python
|
apache-2.0
| 2,341
| 0
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
@decorators.related_bug('1420008'
|
)
@decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
def test_create_image_owner_param(self):
# NOTE: Create image with owner different from tenant owner by
# using "owner" parameter requires an admin privileges.
random_id = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id)
|
self.addCleanup(self.admin_client.delete_image, image['id'])
image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id, image_info['owner'])
@decorators.related_bug('1420008')
@decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
def test_update_image_owner_param(self):
random_id_1 = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id_1)
self.addCleanup(self.admin_client.delete_image, image['id'])
created_image_info = self.admin_client.show_image(image['id'])
random_id_2 = data_utils.rand_uuid_hex()
self.admin_client.update_image(
image['id'], [dict(replace="/owner", value=random_id_2)])
updated_image_info = self.admin_client.show_image(image['id'])
self.assertEqual(random_id_2, updated_image_info['owner'])
self.assertNotEqual(created_image_info['owner'],
updated_image_info['owner'])
|
mccdaq/mcculw
|
examples/ui/DaqDevDiscovery01.py
|
Python
|
mit
| 5,694
| 0
|
"""
File: DaqDevDiscovery01.py
Library Call Demonstrated: mcculw.ul.get_daq_device_inventory()
mcculw.ul.create_daq_device()
mcculw.ul.release_daq_device()
Purpose: Discovers DAQ devices and assigns board number to
the detected devices.
Demonstration: Displays the detected DAQ devices and flashes the
LED of the selected device.
Other Library Calls: mcculw.ul.ignore_instacal()
|
mcculw.ul.flash_led()
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from tkinter import StringVar
from tkinter.ttk import Combobox # @UnresolvedImport
from mcculw import ul
from mcculw.enums import InterfaceType
from mcculw.ul import ULError
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .u
|
i_examples_util import UIExample, show_ul_error
class DaqDevDiscovery01(UIExample):
def __init__(self, master):
super(DaqDevDiscovery01, self).__init__(master)
self.board_num = 0
self.device_created = False
# Tell the UL to ignore any boards configured in InstaCal
ul.ignore_instacal()
self.create_widgets()
def discover_devices(self):
self.inventory = ul.get_daq_device_inventory(InterfaceType.ANY)
if len(self.inventory) > 0:
combobox_values = []
for device in self.inventory:
combobox_values.append(str(device))
self.devices_combobox["values"] = combobox_values
self.devices_combobox.current(0)
self.status_label["text"] = (str(len(self.inventory))
+ " DAQ Device(s) Discovered")
self.devices_combobox["state"] = "readonly"
self.flash_led_button["state"] = "normal"
else:
self.devices_combobox["values"] = [""]
self.devices_combobox.current(0)
self.status_label["text"] = "No Devices Discovered"
self.devices_combobox["state"] = "disabled"
self.flash_led_button["state"] = "disabled"
def flash_led(self):
try:
# Flash the device LED
ul.flash_led(self.board_num)
except ULError as e:
show_ul_error(e)
def selected_device_changed(self, *args): # @UnusedVariable
selected_index = self.devices_combobox.current()
inventory_count = len(self.inventory)
if self.device_created:
# Release any previously configured DAQ device from the UL.
ul.release_daq_device(self.board_num)
self.device_created = False
if inventory_count > 0 and selected_index < inventory_count:
descriptor = self.inventory[selected_index]
# Update the device ID label
self.device_id_label["text"] = descriptor.unique_id
# Create the DAQ device from the descriptor
# For performance reasons, it is not recommended to create
# and release the device every time hardware communication is
# required. Instead, create the device once and do not release
# it until no additional library calls will be made for this
# device
ul.create_daq_device(self.board_num, descriptor)
self.device_created = True
def create_widgets(self):
'''Create the tkinter UI'''
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
discover_button = tk.Button(main_frame)
discover_button["text"] = "Discover DAQ Devices"
discover_button["command"] = self.discover_devices
discover_button.pack(padx=3, pady=3)
self.status_label = tk.Label(main_frame)
self.status_label["text"] = "Status"
self.status_label.pack(anchor=tk.NW, padx=3, pady=3)
results_group = tk.LabelFrame(self, text="Discovered Devices")
results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
self.selected_device_textvar = StringVar()
self.selected_device_textvar.trace('w', self.selected_device_changed)
self.devices_combobox = Combobox(
results_group, textvariable=self.selected_device_textvar)
self.devices_combobox["state"] = "disabled"
self.devices_combobox.pack(fill=tk.X, padx=3, pady=3)
device_id_frame = tk.Frame(results_group)
device_id_frame.pack(anchor=tk.NW)
device_id_left_label = tk.Label(device_id_frame)
device_id_left_label["text"] = "Device Identifier:"
device_id_left_label.grid(row=0, column=0, sticky=tk.W, padx=3, pady=3)
self.device_id_label = tk.Label(device_id_frame)
self.device_id_label.grid(row=0, column=1, sticky=tk.W, padx=3, pady=3)
self.flash_led_button = tk.Button(results_group)
self.flash_led_button["text"] = "Flash LED"
self.flash_led_button["command"] = self.flash_led
self.flash_led_button["state"] = "disabled"
self.flash_led_button.pack(padx=3, pady=3)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.master.destroy
quit_button.grid(row=0, column=1, padx=3, pady=3)
# Start the example if this module is being run
if __name__ == "__main__":
# Start the example
DaqDevDiscovery01(master=tk.Tk()).mainloop()
|
culturagovbr/sistema-nacional-cultura
|
apiv2/filters.py
|
Python
|
agpl-3.0
| 6,975
| 0.001434
|
from django.db.models import Q
from django_filters import rest_framework as filters
from adesao.models import SistemaCultura, UFS
from planotrabalho.models import Componente
class SistemaCulturaFilter(filters.FilterSet):
ente_federado = filters.CharFilter(
field_name='ente_federado__nome__unaccent', lookup_expr='icontains')
estado_sigla = filters.CharFilter(method='sigla_filter')
cnpj_prefeitura = filters.CharFilter(
field_name='sede__cnpj', lookup_expr='contains')
situacao_adesao = filters.CharFilter(
field_name='estado_processo', lookup_expr='exact')
data_adesao = filters.DateFilter(
field_name='data_publicacao_acordo')
data_adesao_min = filters.DateFilter(
field_name='data_publicacao_acordo', lookup_expr=('gte'))
data_adesao_max = filters.DateFilter(
field_name='data_publicacao_acordo', lookup_expr=('lte'))
data_componente_min = filters.DateFilter(
field_name='data_componente_acordo', lookup_expr=('gte'),
method='data_componente_min')
data_componente_max = filters.DateFilter(
field_name='data_componente_acordo', lookup_expr=('lte'),
method='data_componente_max')
data_lei_min = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('gte'))
data_lei_max = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('lte'))
data_orgao_gestor_min = filters.DateFilter(
field_name='orgao_gestor__data_publicacao', lookup_expr=('gte'))
data_orgao_gestor_max = filters.DateFilter(
field_name='orgao_gestor__data_publicacao', lookup_expr=('lte'))
data_orgao_gestor_cnpj_min = filters.DateFilter(
field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('gte'))
data_orgao_gestor_cnpj_max = filters.DateFilter(
field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('lte'))
data_conselho_min = filters.DateFilter(
field_name='conselho__data_publicacao', lookup_expr=('gte'))
data_conselho_max = filters.DateFilter(
field_name='conselho__data_publicacao', lookup_expr=('lte'))
data_conselho_lei_min = filters.DateFilter(
field_name='conselho__lei__data_publicacao', lookup_expr=('gte'))
data_conselho_lei_max = filters.DateFilter(
field_name='conselho__lei__data_publicacao', lookup_expr=('lte'))
data_fundo_cultura_min = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('gte'))
data_fundo_cultura_max = filters.DateFilter(
field_name='legislacao__data_publicacao', lookup_expr=('lte'))
data_fundo_cultura_cnpj_min = filters.DateFilter(
field_name='fundo_cultura__comprovante_cnpj__data_envio
|
', lookup_expr=('gte'))
data_fundo_cultura_cnpj_max = filters.DateFilter(
field_name='fundo_cultura__comprovante_cnpj__data_en
|
vio', lookup_expr=('lte'))
data_plano_min = filters.DateFilter(
field_name='plano__data_publicacao', lookup_expr=('gte'))
data_plano_max = filters.DateFilter(
field_name='plano__data_publicacao', lookup_expr=('lte'))
data_plano_meta_min = filters.DateFilter(
field_name='plano__metas__data_envio', lookup_expr=('gte'))
data_plano_meta_max = filters.DateFilter(
field_name='plano__metas__data_envio', lookup_expr=('lte'))
orgao_gestor_dados_bancarios = filters.BooleanFilter(method='gestor_dados_bancarios_filter')
fundo_cultura_dados_bancarios = filters.BooleanFilter(method='fundo_cultura_dados_bancarios_filter')
situacao_lei_sistema = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='legislacao__situacao',
to_field_name='situacao'
)
situacao_orgao_gestor = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='orgao_gestor__situacao',
to_field_name='situacao'
)
situacao_conselho_cultural = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='conselho__situacao',
to_field_name='situacao'
)
situacao_fundo_cultura = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='fundo_cultura__situacao',
to_field_name='situacao'
)
situacao_plano_cultura = filters.ModelMultipleChoiceFilter(
queryset=Componente.objects.all(),
field_name='plano__situacao',
to_field_name='situacao'
)
municipal = filters.BooleanFilter(method='municipal_filter')
estadual = filters.BooleanFilter(method='estadual_filter')
class Meta:
model = SistemaCultura
exclude = (
'oficio_cadastrador',
'oficio_prorrogacao_prazo',)
def gestor_dados_bancarios_filter(self, queryset, name, value):
queryset = queryset.exclude(orgao_gestor__banco='').exclude(orgao_gestor__agencia='').exclude(orgao_gestor__conta='').exclude(orgao_gestor__banco__isnull=True).exclude(orgao_gestor__agencia__isnull=True).exclude(orgao_gestor__conta__isnull=True)
return queryset
def fundo_cultura_dados_bancarios_filter(self, queryset, name, value):
queryset = queryset.exclude(fundo_cultura__banco='').exclude(fundo_cultura__agencia='').exclude(fundo_cultura__conta='').exclude(fundo_cultura__banco__isnull=True).exclude(fundo_cultura__agencia__isnull=True).exclude(fundo_cultura__conta__isnull=True)
return queryset
def sigla_filter(self, queryset, name, value):
try:
inverseUf = {value: key for key, value in UFS.items()}
cod_ibge = inverseUf[value.upper()]
except Exception:
cod_ibge = value
return queryset.filter(Q(ente_federado__cod_ibge__startswith=cod_ibge))
def estadual_filter(self, queryset, name, value):
pular_filtro = self.checar_filtro_municipal_estadual_ativos()
if(pular_filtro):
return queryset
if value:
queryset = queryset.filter(ente_federado__cod_ibge__lte=100)
return queryset
def municipal_filter(self, queryset, name, value):
pular_filtro = self.checar_filtro_municipal_estadual_ativos()
if(pular_filtro):
return queryset
if value:
queryset = queryset.filter(ente_federado__cod_ibge__gt=100)
return queryset
def checar_filtro_municipal_estadual_ativos(self):
try:
estadual_filter = self.data.getlist('estadual')[0]
municipal_filter = self.data.getlist('municipal')[0]
except IndexError:
return False
if(estadual_filter == 'true' and municipal_filter == 'true'):
return True
return False
class PlanoTrabalhoFilter(SistemaCulturaFilter):
class Meta:
model = SistemaCultura
exclude = (
'oficio_cadastrador',
'oficio_prorrogacao_prazo',)
|
yephper/django
|
django/db/models/query_utils.py
|
Python
|
bsd-3-clause
| 13,827
| 0.000579
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
import inspect
from collections import namedtuple
from django.core.exceptions import FieldDoesNotExist
from django.db.backends import utils
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
@classmethod
def _refs_aggregate(cls, obj, existing_aggregates):
if not isinstance(obj, tree.Node):
aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates)
if not aggregate and hasattr(obj[1], 'refs_aggregate'):
return obj[1].refs_aggregate(existing_aggregates)
return aggregate, aggregate_lookups
for c in obj.children:
aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates)
if aggregate:
return aggregate, aggregate_lookups
return False, ()
def refs_aggregate(self, existing_aggregates):
if not existing_aggregates:
return False
return self._refs_aggregate(self, existing_aggregates)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, cls=None):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
|
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if
|
'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_des
|
googleinterns/audio_synthesis
|
experiments/representation_study/train_spec_gan.py
|
Python
|
apache-2.0
| 3,424
| 0.003505
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Script for SpecGAN on a waveform dataset.
This follows the origonal SpecGAN training,
where the magnitude spectrums are normalized
to sit between -1 and 1.
"""
import os
import tensorflow as tf
import numpy as np
from tensorflow.keras import activations, utils
from audio_synthesis.structures import spec_gan
from audio_synthesis.models import wgan
from audio_synthesis.datasets import waveform_dataset
from audio_synthesis.utils import waveform_save_helper as save_helper
# Setup Paramaters
D_UPDATES_PER_G = 5
Z_DIM = 64
BATCH_SIZE = 64
EPOCHS = 1800
SAMPLING_RATE = 16000
GRIFFIN_LIM_ITERATIONS = 16
FFT_FRAME_LENGTH = 512
FFT_FRAME_STEP = 128
LOG_MAGNITUDE = True
Z_IN_SHAPE = [4, 8, 1024]
SPECTOGRAM_IMAGE_SHAPE = [-1, 128, 256, 1]
CHECKPOINT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/training_checkpoints/'
RESULT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/audio/'
DATASET_PATH = 'data/SpeechMNIST_1850.npz'
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU')))
raw_dataset, magnitude_stats, _ =\
waveform_dataset.get_magnitude_phase_dataset(
DATASET_PATH, FFT_FRAME_LENGTH, FFT_FRAME_STEP, LOG_MAGNITUDE
)
raw_dataset = raw_dataset[:, :, :, 0] # Remove the phase information
normalized_raw_dataset = []
pb_i = utils.Progbar(len(raw_dataset))
for data_point in raw_dataset:
normalized_raw_dataset.append(waveform_dataset.normalize(
data_point, *magnitude_stats
))
pb_i.add(1)
normalized_raw_dataset = np.array(normalized_raw_dataset)
generator = spec_gan.Generator(activation=activations.tanh, in_shape=Z_IN_SHAPE)
discriminator = spec_gan.Discriminator(input_shape=SPECTOGRAM_IMAGE_SHAPE)
generator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
get_waveform = lambda magnitude:\
save_helper.get_waveform_from_normalized_magnitude(
magnitude, magnitude_stats, GRIFFIN_LIM_ITERATIONS, FFT_FRAME_LENGTH,
FFT_FRAME_STEP, LOG_MAGNITUDE
)
save_examples = lambda epoc
|
h, real, generated:\
save_helper.save_wav_data(
epoch, real, generated, SAMPLING_RATE, RESULT_DIR, get_waveform
)
spec_gan_model = wgan.WGAN(
normalized_raw_dataset, generator, [discriminator], Z_DIM, generator_optimizer,
discriminator_optimizer, discriminator_trainin
|
g_ratio=D_UPDATES_PER_G,
batch_size=BATCH_SIZE, epochs=EPOCHS, checkpoint_dir=CHECKPOINT_DIR,
fn_save_examples=save_examples
)
spec_gan_model.restore('ckpt-129', 1290)
spec_gan_model.train()
if __name__ == '__main__':
main()
|
spektom/incubator-airflow
|
airflow/providers/google/marketing_platform/operators/search_ads.py
|
Python
|
apache-2.0
| 7,440
| 0.00121
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on a
|
n
# "AS IS" BASIS, WITHOUT WARRANTIES OR CO
|
NDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Search Ads operators.
"""
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook
from airflow.utils.decorators import apply_defaults
class GoogleSearchAdsInsertReportOperator(BaseOperator):
"""
Inserts a report request into the reporting system.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/request
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsInsertReportOperator`
:param report: Report to be generated
:type report: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report",)
template_ext = (".json",)
@apply_defaults
def __init__(
self,
report: Dict[str, Any],
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Generating Search Ads report")
response = hook.insert_report(report=self.report)
report_id = response.get("id")
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Report generated, id: %s", report_id)
return response
class GoogleSearchAdsDownloadReportOperator(BaseOperator):
"""
Downloads a report to GCS bucket.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/getFile
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsGetfileReportOperator`
:param report_id: ID of the report.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file. If not provided then
report_id is used.
:type report_name: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_name", "report_id", "bucket_name")
@apply_defaults
def __init__(
self,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
def _resolve_file_name(self, name: str) -> str:
csv = ".csv"
gzip = ".gz"
if not name.endswith(csv):
name += csv
if self.gzip:
name += gzip
return name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
@staticmethod
def _handle_report_fragment(fragment: bytes) -> bytes:
fragment_records = fragment.split(b"\n", 1)
if len(fragment_records) > 1:
return fragment_records[1]
return b""
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
# Resolve file name of the report
report_name = self.report_name or self.report_id
report_name = self._resolve_file_name(report_name)
response = hook.get(report_id=self.report_id)
if not response['isReportReady']:
raise AirflowException('Report {} is not ready yet'.format(self.report_id))
# Resolve report fragments
fragments_count = len(response["files"])
# Download chunks of report's data
self.log.info("Downloading Search Ads report %s", self.report_id)
with NamedTemporaryFile() as temp_file:
for i in range(fragments_count):
byte_content = hook.get_file(
report_fragment=i, report_id=self.report_id
)
fragment = (
byte_content
if i == 0
else self._handle_report_fragment(byte_content)
)
temp_file.write(fragment)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
)
self.xcom_push(context, key="file_name", value=report_name)
|
Painatalman/python101
|
sources/101_test.py
|
Python
|
apache-2.0
| 519
| 0.003854
|
from fruits import validate_fruit
fruits = ["banana", "lemon", "apple", "orange", "batman"]
print fruits
def list_fruits(fruits, byName=True):
if byName:
# WARNING: this won't make a copy of the list and return it. It
|
will change the list FOREVER
fruits.sort()
for index, fruit in enumerate(fruits):
if validate_fruit(fruit):
print "Fruit nr %d is %s" % (index, fruit)
else:
print "This %s is no fr
|
uit!" % (fruit)
list_fruits(fruits)
print fruits
|
epssy/hue
|
desktop/libs/libzookeeper/src/libzookeeper/models.py
|
Python
|
apache-2.0
| 1,274
| 0.007064
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from kazoo.client import KazooClient
from libzookeeper.conf import PRINCIPAL_NAME
def get_children_data(ensemble, namespace,
|
read_only=True):
zk = KazooClient(hosts=ensemble, read_only=read_only, sasl_server_principal=PRINCIPAL_NAME.get())
zk.start()
children_data = []
children = zk.get_children(namespace)
for node in children:
data, stat = zk.get("%s/%s" % (namespace, node))
children_data.append(data)
zk.stop()
return
|
children_data
|
tlevine/django-inplaceedit
|
testing/run_tests.py
|
Python
|
lgpl-3.0
| 1,189
| 0.000841
|
#!/usr/bin/env python
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser Ge
|
neral Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
import django
import os
import sys
from django.conf import ENVIRONMENT_VARIABLE
from django.core import management
if len(sys.argv) == 1:
os.environ[ENVIRONMENT_VARIABLE] = 'testing.settings'
else:
os.environ[ENVIRONMENT_VARIABLE] = sys.argv[1]
if django.VERSION[0] == 1 and django.VERSION[1] <= 5:
management.call_command('test', 'unit_tests
|
')
else:
management.call_command('test', 'testing.unit_tests')
|
seanxwzhang/LeetCode
|
Airbnb/preference_list.py
|
Python
|
mit
| 788
| 0.007003
|
# 每个人都有一个preference的排序,在不违反
|
每个人的preference的情况下得到总体的preference的排序 拓扑排序解决(https://instant.1point3acres.com/thread/207601)
import itertools
import collections
def preferenceList1(prefList): # topological sort 1
pairs = []
for lis in prefList:
for left, right in zip(lis, lis[1:]):
pairs
|
+= (left, right),
allItems, res = set(itertools.chain(*pairs)), []
while pairs:
free = allItems - set(zip(*pairs)[1])
if not free:
None
res += list(free)
pairs = filter(free.isdisjoint, pairs)
allItems -= free
return res + list(allItems)
print(preferenceList1([[1, 2, 3, 4], ['a', 'b', 'c', 'd'], ['a', 1, 8], [2, 'b', 'e'], [3, 'c']]))
|
pattisdr/osf.io
|
api_tests/nodes/views/test_view_only_query_parameter.py
|
Python
|
apache-2.0
| 15,854
| 0.000442
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from osf.utils import permissions
@pytest.fixture()
def admin():
return AuthUserFactory()
@pytest.fixture()
def base_url():
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def read_contrib():
return AuthUserFactory()
@pytest.fixture()
def write_contrib():
return AuthUserFactory()
@pytest.fixture()
def valid_contributors(admin, read_contrib, write_contrib):
return [
admin._id,
read_contrib._id,
write_contrib._id,
]
@pytest.fixture()
def private_node_one(admin, read_contrib, write_contrib):
private_node_one = ProjectFactory(
is_public=False,
creator=admin,
title='Private One')
private_node_one.add_contributor(
read_contrib, permissions=[
permissions.READ], save=True)
private_node_one.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return private_node_one
@pytest.fixture()
def private_node_one_anonymous_link(private_node_one):
private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
private_node_one_anonymous_link.nodes.add(private_node_one)
private_node_one_anonymous_link.save()
return private_node_one_anonymous_link
@pytest.fixture()
def private_node_one_private_link(private_node_one):
private_node_one_private_link = PrivateLinkFactory(anonymous=False)
private_node_one_private_link.nodes.add(private_node_one)
private_node_one_private_link.save()
return private_node_one_private_link
@pytest.fixture()
def private_node_one_url(private_node_one):
return '/{}nodes/{}/'.format(API_BASE, private_node_one._id)
@pytest.fixture()
def private_node_two(admin, read_contrib, write_contrib):
private_node_two = ProjectFactory(
is_public=False,
creator=admin,
title='Private Two')
private_node_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
private_node_two.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return private_node_two
@pytest.fixture()
def private_node_two_url(private_node_two):
return '/{}nodes/{}/'.format(API_BASE, private_node_two._id)
@pytest.fixture()
def public_node_one(admin, read_contrib, write_contrib):
public_node_one = ProjectFactory(
is_public=True, creator=admin, title='Public One')
public_node_one.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
public_node_one.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return public_node_one
@pytest.fixture()
def public_node_one_anonymous_link(public_node_one):
public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
public_node_one_anonymous_link.nodes.add(public_node_one)
public_node_one_anonymous_link.save()
return public_node_one_anonymous_link
@pytest.fixture()
def public_node_one_private_link(public_node_one):
public_node_one_private_link = PrivateLinkFactory(anonymous=False)
public_node_one_private_link.nodes.add(public_node_one)
public_node_one_private_link.save()
return public_node_one_private_link
@pytest.fixture()
def public_node_one_url(public_node_one):
return '/{}nodes/{}/'.format(API_BASE, public_node_one._id)
@pytest.fixture()
def public_node_two(admin, read_contrib, write_contrib):
public_node_two = ProjectFactory(
is_public=True, creator=admin, title='Public Two')
public_node_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
public_node_two.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
save=True)
return public_node_two
@pytest.fixture()
def public_node_two_url(public_node_two):
return '/{}nodes/{}/'.format(API_BASE, public_node_two._id)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.usefixtures(
'admin',
'read_contrib',
'write_contrib',
'valid_contributors',
'private_node_one',
'private_node_one_anonymous_link',
'private_node_one_private_link',
'private_node_one_url',
'private_node_two',
'private_node_two_url',
'public_node_one',
'public_node_one_anonymous_link',
'public_node_one_private_link',
'public_node_one_url',
'public_node_two',
'public_node_two_url')
class TestNodeDetailViewOnlyLinks:
def test_private_node(
self, app, admin, read_contrib, valid_contributors,
private_node_one, private_node_one_url,
private_node_one_private_link,
private_node_one_anonymous_link,
public_node_one_url,
public_node_one_private_link,
public_node_one_anonymous_link):
# test_private_node_with_link_works_when_using_link
res_normal = app.get(private_node_one_url, auth=read_contrib.auth)
assert res_normal.status_code == 200
res_linked = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res_linked.status_code == 200
assert res_linked.json['data']['attributes']['current_user_permissions'] == [
'read']
# Remove any keys that will be different for view-only responses
res_normal_json = res_normal.json
res_linked_json = res_linked.json
user_can_comment = res_normal_json['data']['attributes'].pop(
'current_user_can_comment')
view_only_can_comment = res_linked_json['data']['attributes'].pop(
'current_user_can_comment')
assert user_can_comment
assert not view_only_can_comment
# test_private_node_with_link_unauthorized_when_not_using_link
res = app.get(private_node_one_url, expect_errors=True)
assert res.status_code == 401
# test_private_node_with_link_anonymous_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_private_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
}, auth=admin.auth)
assert res.status_code == 200
|
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_public
|
_node_with_link_anonymous_does_not_expose_user_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_public_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_private_link.key,
'embed': 'contributors',
})
|
mosajjal/mitmproxy
|
test/mitmproxy/net/test_wsgi.py
|
Python
|
mit
| 3,186
| 0.000942
|
from io import BytesIO
import sys
from mitmproxy.net import wsgi
from mitmproxy.net.http import Headers
def tflow():
headers = Headers(test=b"value")
req = wsgi.Request("http", "GET", "/", "HTTP/1.1", headers, "")
return wsgi.Flow(("127.0.0.1", 8888), req)
class ExampleApp:
def __init__(self):
self.called = False
def __call__(self, environ, start_response):
self.called = True
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello', b' world!\n']
class TestWSGI:
def test_make_environ(self):
w = wsgi.WSGIAdaptor(None, "foo", 80, "version")
tf = tflow()
assert w.make_environ(tf, None)
tf.request.path = "/foo?bar=voing"
r = w.make_environ(tf, None)
assert r["QUERY_STRING"] == "bar=voing"
def test_serve(self):
ta = ExampleApp()
w = wsgi.WSGIAdaptor(ta, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
err = w.serve(f, wfile)
assert ta.called
assert not err
val = wfile.getvalue()
assert b"Hello world" in val
assert b"Server:" in val
def _serve(self, app):
w = wsgi.WSGIAdaptor(app, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
w.serve(f, wfile)
return wfile.getvalue()
def test_serve_empty_body(self):
def app(environ, start_response):
status = '200 OK'
response_headers = [('Foo', 'bar')]
start_response(status, response_headers)
return []
assert self._serve(app)
def test_serve_double_start(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain
|
')]
start_response(status, response_headers)
start_response(status, response_headers)
assert b"Internal Server Error" in self._serve(app)
def test_serve_single_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-ty
|
pe', 'text/plain')]
start_response(status, response_headers, ei)
yield b""
assert b"Internal Server Error" in self._serve(app)
def test_serve_double_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield b"aaa"
start_response(status, response_headers, ei)
yield b"bbb"
assert b"Internal Server Error" in self._serve(app)
|
mgrygoriev/CloudFerry
|
cloudferrylib/os/actions/transport_ephemeral.py
|
Python
|
apache-2.0
| 7,851
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# S
|
ee the License for the specific language governing permissions and#
# limitations under the License.
import copy
import hashlib
import os
from fabric.api import env
from fabric.api import run
from fabric.api import settings
from oslo_config import cfg
from cloudferrylib.base.action import action
from cloudferrylib.os.actions import task_transfer
from cloudferrylib.utils.utils import forward_agent
from cloudferrylib.utils import utils as utl
from cloudferrylib.utils import qemu_img as qemu_img_util
C
|
ONF = cfg.CONF
CLOUD = 'cloud'
BACKEND = 'backend'
CEPH = 'ceph'
ISCSI = 'iscsi'
COMPUTE = 'compute'
INSTANCES = 'instances'
INSTANCE_BODY = 'instance'
INSTANCE = 'instance'
DIFF = 'diff'
EPHEMERAL = 'ephemeral'
DIFF_OLD = 'diff_old'
EPHEMERAL_OLD = 'ephemeral_old'
PATH_DST = 'path_dst'
HOST_DST = 'host_dst'
PATH_SRC = 'path_src'
HOST_SRC = 'host_src'
BACKING_FILE_DST = 'backing_file_dst'
TEMP = 'temp'
FLAVORS = 'flavors'
TRANSPORTER_MAP = {CEPH: {CEPH: 'SSHCephToCeph',
ISCSI: 'SSHCephToFile'},
ISCSI: {CEPH: 'SSHFileToCeph',
ISCSI: 'SSHFileToFile'}}
class TransportEphemeral(action.Action):
# TODO constants
def run(self, info=None, **kwargs):
info = copy.deepcopy(info)
# Init before run
new_info = {
utl.INSTANCES_TYPE: {
}
}
# Get next one instance
for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems():
is_ephemeral = instance[utl.INSTANCE_BODY]['is_ephemeral']
one_instance = {
utl.INSTANCES_TYPE: {
instance_id: instance
}
}
if is_ephemeral:
self.copy_ephemeral(self.src_cloud,
self.dst_cloud,
one_instance)
new_info[utl.INSTANCES_TYPE].update(
one_instance[utl.INSTANCES_TYPE])
return {
'info': new_info
}
@staticmethod
def delete_remote_file_on_compute(path_file, host_cloud,
host_instance):
with settings(host_string=host_cloud,
connection_attempts=env.connection_attempts):
with forward_agent(env.key_filename):
run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" %
(host_instance, path_file))
def copy_data_via_ssh(self, src_cloud, dst_cloud, info, body, resources,
types):
dst_storage = dst_cloud.resources[resources]
src_compute = src_cloud.resources[resources]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
ssh_driver = (CONF.migrate.copy_backend
if CONF.migrate.direct_compute_transfer
else TRANSPORTER_MAP[src_backend][dst_backend])
transporter = task_transfer.TaskTransfer(
self.init,
ssh_driver,
resource_name=types,
resource_root_name=body)
transporter.run(info=info)
def copy_ephemeral(self, src_cloud, dst_cloud, info):
dst_storage = dst_cloud.resources[utl.COMPUTE_RESOURCE]
src_compute = src_cloud.resources[utl.COMPUTE_RESOURCE]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
if (src_backend == CEPH) and (dst_backend == ISCSI):
self.copy_ephemeral_ceph_to_iscsi(src_cloud, dst_cloud, info)
elif (src_backend == ISCSI) and (dst_backend == CEPH):
self.copy_ephemeral_iscsi_to_ceph(src_cloud, info)
else:
self.copy_data_via_ssh(src_cloud,
dst_cloud,
info,
utl.EPHEMERAL_BODY,
utl.COMPUTE_RESOURCE,
utl.INSTANCES_TYPE)
self.rebase_diff(dst_cloud, info)
def copy_ephemeral_ceph_to_iscsi(self, src_cloud, dst_cloud, info):
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][ISCSI],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
instances = info[utl.INSTANCES_TYPE]
temp_src = src_cloud.cloud_config.cloud.temp
host_dst = dst_cloud.cloud_config.cloud.ssh_host
qemu_img_dst = dst_cloud.qemu_img
qemu_img_src = src_cloud.qemu_img
temp_path_src = temp_src + "/%s" + utl.DISK_EPHEM
for inst_id, inst in instances.iteritems():
path_src_id_temp = temp_path_src % inst_id
host_compute_dst = inst[EPHEMERAL][HOST_DST]
inst[EPHEMERAL][
BACKING_FILE_DST] = qemu_img_dst.detect_backing_file(
inst[EPHEMERAL][PATH_DST], host_compute_dst)
self.delete_remote_file_on_compute(inst[EPHEMERAL][PATH_DST],
host_dst,
host_compute_dst)
qemu_img_src.convert(
utl.QCOW2,
'rbd:%s' % inst[EPHEMERAL][PATH_SRC], path_src_id_temp)
inst[EPHEMERAL][PATH_SRC] = path_src_id_temp
transporter.run(info=info)
for inst_id, inst in instances.iteritems():
host_compute_dst = inst[EPHEMERAL][HOST_DST]
qemu_img_dst.diff_rebase(inst[EPHEMERAL][BACKING_FILE_DST],
inst[EPHEMERAL][PATH_DST],
host_compute_dst)
def copy_ephemeral_iscsi_to_ceph(self, src_cloud, info):
instances = info[utl.INSTANCES_TYPE]
qemu_img_src = src_cloud.qemu_img
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][CEPH],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
for inst_id, inst in instances.iteritems():
path_src = inst[EPHEMERAL][PATH_SRC]
path_src_temp_raw = path_src + "." + utl.RAW
host_src = inst[EPHEMERAL][HOST_SRC]
qemu_img_src.convert(utl.RAW,
path_src,
path_src_temp_raw,
host_src)
inst[EPHEMERAL][PATH_SRC] = path_src_temp_raw
transporter.run(info=info)
@staticmethod
def rebase_diff(dst_cloud, info):
for instance_id, obj in info[utl.INSTANCES_TYPE].items():
image_id = obj['instance']['image_id']
new_backing_file = hashlib.sha1(image_id).hexdigest()
diff = obj['diff']
host = diff['host_dst']
qemu_img = qemu_img_util.QemuImg(dst_cloud.config.dst,
dst_cloud.config.migrate,
host)
diff_path = diff['path_dst']
backing_path = qemu_img.detect_backing_file(diff_path, None)
backing_dir = os.path.dirname(backing_path)
new_backing_path = os.path.join(backing_dir, new_backing_file)
qemu_img.diff_rebase(new_backing_path, diff_path)
|
rexthompson/axwx
|
axwx/wu_metadata_scraping.py
|
Python
|
mit
| 5,613
| 0
|
"""
Weather Underground PWS Metadata Scraping Module
Code to scrape PWS network metadata
"""
import pandas as pd
import urllib3
from bs4 import BeautifulSoup as BS
import numpy as np
import requests
# import time
def scrape_station_info(state="WA"):
"""
A script to scrape the station information published at the following URL:
https://www.wunderground.com/weatherstation/ListStations.asp?
selectedState=WA&selectedCountry=United+States&MR=1
:param state: US State by which to subset WU Station table
:return: numpy array with station info
"""
url = "https://www.wunderground.com/" \
"weatherstation/ListStations.asp?selectedState=" \
+ state + "&selectedCountry=United+States&MR=1"
raw_site_content = requests.get(url).content
soup = BS(raw_site_content, 'html.parser')
list_stations_info = soup.find_all("tr")
all_station_info = np.array(['id', 'neighborhood', 'city', 'type', 'lat',
'lon', 'elevation'])
for i in range(1, len(list_stations_info)): # start at 1 to omit headers
station_info = str(list_stations_info[i]).splitlines()
# pull out station info
station_id = station_info[1].split('ID=')[1].split('"')[0]
station_neighborhood = station_info[2].split('<td>')[1]
station_neighborhood = station_neighborhood.split('\xa0')[0]
station_city = station_info[3].split('<td>')[1].split('\xa0')[0]
station_type = station_info[4].split('station-type">')[1]
station_type = station_type.split('\xa0')[0]
station_id = station_id.strip()
station_neighborhood = station_neighborhood.strip()
station_city = station_city.strip()
station_type = station_type.strip()
# grab the latitude, longitude, and elevation metadata
lat, lon, elev = scrape_lat_lon_fly(station_id)
# put all data into an array
header = [station_id, station_neighborhood, station_city, station_type,
lat, lon, elev]
head_len = len(header)
all_station_info = np.vstack([all_station_info, header])
all_station_info = pd.DataFrame(all_station_info)
all_station_info.columns = all_station_info.ix[0, :]
# do some dataframe editing
all_station_info = all_station_info.drop(all_station_info
.index[0]).reset_index()
all_station_info = all_station_info.drop(all_station_info.columns[0],
axis=1)
return(all_station_info.to_csv('./data/station_data_from_FUN.csv'))
def scrape_lat_lon_fly(stationID):
"""
Add latitude, longitude and elevation data to the stationID that is
inputted as the argument to the function. Boom.
:param stationID: str
a unique identifier for the weather underground personal
weather station
:return: (latitude,longitude,elevation) as a tuple. Double Boom.
"""
http = urllib3.PoolManager(maxsize=10, block=True,
cert_reqs='CERT_REQUIRED')
try:
|
url = 'https://api.wunderground.com/weatherstation/' \
'WXDailyHistory.asp?ID={0}&format=XML'.format(stationID)
r = http.request('GET', url, preload_content=False)
soup = BS(r, 'xml')
lat =
|
soup.find_all('latitude')[0].get_text()
long = soup.find_all('longitude')[0].get_text()
elev = soup.find_all('elevation')[0].get_text()
return(lat, long, elev)
except Exception as err:
lat = 'NA'
long = 'NA'
elev = 'NA'
return(lat, long, elev)
def subset_stations_by_coords(station_data, lat_range, lon_range):
"""
Subset station metadata by latitude and longitude
:param station_data_csv: str or Pandas.DataFrame
filename of csv with station metadata (from scrape_lat_lon)
or
Pandas.DataFrame with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: pandas.DataFrame with station metadata subset by lat/lon bounds
"""
lat_range.sort()
lon_range.sort()
if isinstance(station_data, str):
df = pd.read_csv(station_data, index_col=1)
df = df.dropna(subset=["Latitude", "Longitude"])
elif isinstance(station_data, pd.DataFrame):
df = station_data
else:
pass
# TODO: add exception here if type not supported
df = df[(df["Latitude"] >= lat_range[0]) &
(df["Latitude"] <= lat_range[1]) &
(df["Longitude"] >= lon_range[0]) &
(df["Longitude"] <= lon_range[1])]
return df
def get_station_ids_by_coords(station_data_csv, lat_range, lon_range):
"""
Wrapper around subset_stations_by_coords; returns just the IDs of the
stations in a box
:param station_data_csv: str
filename of csv with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: list of station IDs (strings)
"""
df = subset_stations_by_coords(station_data_csv, lat_range, lon_range)
return list(df.index)
# TESTING
# station_data_csv = "data/station_data.csv"
# lat_range = [47.4, 47.8]
# lon_range = [-122.5, -122.2]
# print(get_station_ids_by_coords(station_data_csv, lat_range, lon_range))
|
nophead/Skeinforge50plus
|
skeinforge_application/skeinforge_plugins/craft_plugins/export.py
|
Python
|
agpl-3.0
| 20,837
| 0.017853
|
"""
This page is in the table of contents.
Export is a craft tool to pick an export plugin, add information to the file name, and delete comments.
The export manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Export
==Operation==
The default 'Activate Export' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Add Descriptive Extension===
Default is off.
When selected, key profile values will be added as an extension to the gcode file. For example:
test.04hx06w_03fill_2cx2r_33EL.gcode
would mean:
* . (Carve section.)
* 04h = 'Layer Height (mm):' 0.4
* x
* 06w = 0.6 width i.e. 0.4 times 'Edge Width over Height (ratio):' 1.5
* _ (Fill section.)
* 03fill = 'Infill Solidity (ratio):' 0.3
* _ (Multiply section; if there is one column and one row then this section is not shown.)
* 2c = 'Number of Columns (integer):' 2
* x
* 2r = 'Number of Rows (integer):' 2.
* _ (Speed section.)
* 33EL = 'Feed Rate (mm/s):' 33.0 and 'Flow Rate Setting (float):' 33.0. If either value has a positive value after the decimal place then this is also shown, but if it is zero it is hidden. Also, if the values differ (which they shouldn't with 5D volumetrics) then each should be displayed separately. For example, 35.2E30L = 'Feed Rate (mm/s):' 35.2 and 'Flow Rate Setting (float):' 30.0.
===Add Profile Extension===
Default is off.
When selected, the current profile will be added to the file extension. For example:
test.my_profile_name.gcode
===Add Timestamp Extension===
Default is off.
When selected, the current date and time is added as an extension in format YYYYmmdd_HHMMSS (so it is sortable if one has many files). For example:
test.my_profile_name.20110613_220113.gcode
===Also Send Output To===
Default is empty.
Defines the output name for sending to a file or pipe. A common choice is stdout to print the output in the shell screen. Another common choice is stderr. With the empty default, nothing will be done. If the value is anything else, the output will be written to that file name.
===Analyze Gcode===
Default is on.
When selected, the penultimate gcode will be sent to the analyze plugins to be analyzed and viewed.
===Comment Choice===
Default is 'Delete All Comments'.
====Do Not Delete Comments====
When selected, export will not delete comments. Crafting comments slow down the processing in many firmware types, which leads to pauses and therefore a lower quality print.
====Delete Crafting Comments====
When selected, export will delete the time consuming crafting comments, but leave the initialization comments. Since the crafting comments are deleted, there are no pauses during extrusion. The remaining initialization comments provide some useful information for the analyze tools.
====Delete All Comments====
When selected, export will delete all comments. The comments are not necessary to run a fabricator. Some printers do not support comments at all so the safest way is choose this option.
===Export Operations===
Export presents the user with a choice of the export plugins in the export_plugins folder. The chosen plugin will then modify the gcode or translate it into another format. There is also the "Do Not Change Output" choice, which will not change the output. An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function.
===File Extension===
Default is gcode.
Defines the file extension added to the name of the output file. The output file will be named as originalname_export.extension so if you are processing XYZ.stl the output will by default be XYZ_export.gcode
===Name of Replace File===
Default is replace.csv.
When export is exporting the code, if there is a tab separated file with the name of the "Name of Replace File" setting, it will replace the string in the first column by its replacement in the second column. If there is nothing in the second column, the first column string will be deleted, if this leads to an empty line, the line will be deleted. If there are replacement columns after the second, they will be added as extra lines of text. There is an example file replace_example.csv to demonstrate the tab separated format, which can be edited in a text editor or a spreadsheet.
Export looks for the alteration file in the alterations folder in the .skeinforge folder in the home directory. Export does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
===Save Penultimate Gcode===
Default is off.
When selected, export will save the gcode file with the suffix '_penultimate.gcode' just before it is exported. This is useful because the code after it is exported could be in a form which the viewers can not display well.
==Examples==
The following examples export the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and export.py.
> python export.py
This brings up the export dialog.
> python export.py Screw Holder Bottom.stl
The export tool is parsing the file:
Screw Holder Bottom.stl
..
The export tool has created the file:
.. Screw Holder Bottom_export.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_analyze
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cStringIO
import os
import sys
import time
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Gary Hodgson <http://garyhodgson.com/reprap/2011/06/hacking-skeinforge-export-module/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedTextFromText(gcodeText, repository=None):
'Export a gcode linear move text.'
if gcodec.isP
|
rocedureDoneOrFileIsEmpty( gcodeText, 'export'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(ExportRepository())
if not repository.activateExport.value:
return gcodeText
return ExportSkein().getCraftedGcode(repository, gcodeText)
def getDescriptionCarve(lines):
'Get the description for carve.'
descriptionCarve = ''
layerThicknessString = getSettingString(lines, 'carve', 'Layer Height')
if layerThicknes
|
sString != None:
descriptionCarve += layerThicknessString.replace('.', '') + 'h'
edgeWidthString = getSettingString(lines, 'carve', 'Edge Width over Height')
if edgeWidthString != None:
descriptionCarve += 'x%sw' % str(float(edgeWidthString) * float(layerThicknessString)).replace('.', '')
return descriptionCarve
def getDescriptionFill(lines):
'Get the description for fill.'
activateFillString = getSettingString(lines, 'fill', 'Activate Fill')
if activateFillString == None or activateFillString == 'False':
return ''
infillSolidityString = getSettingString(lines, 'fill', 'Infill Solidity')
return '_' + infillSolidityString.replace('.', '') + 'fill'
def getDescriptionMultiply(lines):
'Get the description for multiply.'
activateMultiplyString = getSettingString(lines, 'multiply', 'Activate Multiply')
if activateMultiplyString == None or activateMultiplyString == 'False':
return ''
columnsString = getSettingString(lines, 'multipl
|
ModernMT/MMT
|
cli/utils/osutils.py
|
Python
|
apache-2.0
| 2,402
| 0.001665
|
import logging
import os
import shutil
import subprocess
DEVNULL = open(os.devnull, 'wb')
class ShellError(Exception):
def __init__(self, command, err_no, message=None):
self.command = command
self.errno = err_no
self.message = message
def __str__(self):
string = "Command '%s' failed with exit code %d" % (self.command, self.errno)
if self.message is not None:
string += ': ' + repr(self.messag
|
e)
return string
def __repr__(self):
return self.__str__()
def shell_exec(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, background=False, env=None):
str_cmd = cmd if isinstance(cmd, str) else ' '.join(cmd)
logging.getLogger('shell_exec').debug(str_c
|
md)
message = None
if background:
if stdout == subprocess.PIPE:
stdout = DEVNULL
if stderr == subprocess.PIPE:
stderr = DEVNULL
elif stdin is not None and isinstance(stdin, str):
message = stdin
stdin = subprocess.PIPE
process = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=isinstance(cmd, str), env=env)
stdout_dump = None
stderr_dump = None
return_code = 0
if message is not None or stdout == subprocess.PIPE or stderr == subprocess.PIPE:
stdout_dump, stderr_dump = process.communicate(message)
return_code = process.returncode
elif not background:
return_code = process.wait()
if background:
return process
else:
if stdout_dump is not None:
stdout_dump = stdout_dump.decode('utf-8')
if stderr_dump is not None:
stderr_dump = stderr_dump.decode('utf-8')
if return_code != 0:
raise ShellError(str_cmd, return_code, stderr_dump)
else:
return stdout_dump, stderr_dump
def mem_size(megabytes=True):
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
return mem_bytes / (1024. ** 2) if megabytes else mem_bytes
def lc(filename):
with open(filename) as stream:
count = 0
for _ in stream:
count += 1
return count
def cat(files, output, buffer_size=10 * 1024 * 1024):
with open(output, 'wb') as blob:
for f in files:
with open(f, 'rb') as source:
shutil.copyfileobj(source, blob, buffer_size)
|
rezoo/chainer
|
chainer/optimizers/rmsprop.py
|
Python
|
mit
| 4,921
| 0
|
import numpy
from chainer.backends import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.01
_default_hyperparam.alpha = 0.99
_default_hyperparam.eps = 1e-8
_default_hyperparam.eps_inside_sqrt = False
class RMSpropRule(optimizer.UpdateRule):
"""Update rule for RMSprop.
See :class:`~chainer.optimizers.RMSprop` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
|
"""
def __init__(self, parent_hyperparam=None, lr=None, alpha=None, eps=None,
eps_inside_sqrt=None):
super(RMSpropRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyper
|
param.lr = lr
if alpha is not None:
self.hyperparam.alpha = alpha
if eps is not None:
self.hyperparam.eps = eps
if eps_inside_sqrt is not None:
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
def init_state(self, param):
xp = cuda.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['ms'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
ms = self.state['ms']
ms *= hp.alpha
ms += (1 - hp.alpha) * grad * grad
if hp.eps_inside_sqrt:
denom = numpy.sqrt(ms + eps)
else:
denom = numpy.sqrt(ms) + eps
param.data -= hp.lr * grad / denom
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
if hp.eps_inside_sqrt:
denom = 'sqrt(ms + eps)'
else:
denom = 'sqrt(ms) + eps'
kernel = cuda.elementwise(
'T grad, T lr, T alpha, T eps',
'T param, T ms',
'''ms = alpha * ms + (1 - alpha) * grad * grad;
param -= lr * grad / ({});'''.format(denom),
'rmsprop')
kernel(grad, self.hyperparam.lr, self.hyperparam.alpha,
eps, param.data, self.state['ms'])
class RMSprop(optimizer.GradientMethod):
"""RMSprop optimizer.
See: T. Tieleman and G. Hinton (2012). Lecture 6.5 - rmsprop, COURSERA:
Neural Networks for Machine Learning.
Args:
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
"""
def __init__(self, lr=_default_hyperparam.lr,
alpha=_default_hyperparam.alpha, eps=_default_hyperparam.eps,
eps_inside_sqrt=_default_hyperparam.eps_inside_sqrt):
super(RMSprop, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.alpha = alpha
self.hyperparam.eps = eps
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
lr = optimizer.HyperparameterProxy('lr')
alpha = optimizer.HyperparameterProxy('alpha')
eps = optimizer.HyperparameterProxy('eps')
eps_inside_sqrt = optimizer.HyperparameterProxy('eps_inside_sqrt')
def create_update_rule(self):
return RMSpropRule(self.hyperparam)
|
TheWardoctor/Wardoctors-repo
|
script.module.urlresolver/lib/urlresolver/plugins/tudou.py
|
Python
|
apache-2.0
| 2,082
| 0.004803
|
"""
Kodi urlresolver plugin
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
|
import re
import urllib
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class TudouResolver(UrlResolver):
name = 'Tudou'
domains = ['tudou.com']
pattern = '(?://|\.)(tudou\.com)/programs/view/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
|
swf = re.findall('(http.+?\.swf)', html)[0]
sid = re.findall('areaCode\s*:\s*"(\d+)', html)[0]
oid = re.findall('"k"\s*:\s*(\d+)', html)[0]
f_url = 'http://v2.tudou.com/f?id=%s&sid=%s&hd=3&sj=1' % (oid, sid)
headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': swf}
html = self.net.http_GET(f_url, headers=headers).content
url = re.findall('>(http.+?)<', html)[0]
url = url.replace('&', '&')
video = self.net.http_HEAD(url, headers=headers).get_headers()
video = [i for i in video if 'video' in i]
if not video:
raise ResolverError('File not found')
url += '|%s' % urllib.urlencode(headers)
return url
raise ResolverError('Unable to locate link')
def get_url(self, host, media_id):
return 'http://www.tudou.com/programs/view/%s/' % media_id
|
zonca/pycfitsio
|
pycfitsio/__init__.py
|
Python
|
gpl-3.0
| 234
| 0.012821
|
import warnings
from .file import File, open, read, create,
|
write, CfitsioError
try:
from healpix import read_map, read_mask
except:
warnings.warn('Cannot import read_map and read_mask if healpy is not install
|
ed')
pass
|
pwil3058/darning
|
darning/cli/subcmd_select.py
|
Python
|
gpl-2.0
| 1,953
| 0.00768
|
### Copyright (C) 2010 Peter Williams <peter_ono@users.sourceforge.net>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write t
|
o the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
|
110-1301 USA
"""Select/display which patch guards are in force."""
import sys
from . import cli_args
from . import db_utils
from . import msg
PARSER = cli_args.SUB_CMD_PARSER.add_parser(
"select",
description=_("Display/select which patch guards are in force."),
epilog=_("""When invoked with no arguments the currently selected guards are listed."""),
)
GROUP = PARSER.add_mutually_exclusive_group()
GROUP.add_argument(
"-n", "--none",
help=_("Disable all guards."),
dest="opt_none",
action="store_true",
)
GROUP.add_argument(
"-s", "--set",
help=_("the list of guards to be enabled/selected."),
dest="guards",
metavar="guard",
action="append",
)
def run_select(args):
"""Execute the "select" sub command using the supplied args"""
PM = db_utils.get_pm_db()
db_utils.set_report_context(verbose=True)
if args.opt_none:
return PM.do_select_guards(None)
elif args.guards:
return PM.do_select_guards(args.guards)
else:
selected_guards = PM.get_selected_guards()
for guard in sorted(selected_guards):
sys.stdout.write(guard + "\n")
return 0
PARSER.set_defaults(run_cmd=run_select)
|
mahak/neutron
|
neutron/privileged/agent/linux/__init__.py
|
Python
|
apache-2.0
| 1,208
| 0
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permission
|
s and limitations
# under the License.
import ctype
|
s
from ctypes import util as ctypes_util
_CDLL = None
def get_cdll():
global _CDLL
if not _CDLL:
# NOTE(ralonsoh): from https://docs.python.org/3.6/library/
# ctypes.html#ctypes.PyDLL: "Instances of this class behave like CDLL
# instances, except that the Python GIL is not released during the
# function call, and after the function execution the Python error
# flag is checked."
# Check https://bugs.launchpad.net/neutron/+bug/1870352
_CDLL = ctypes.PyDLL(ctypes_util.find_library('c'), use_errno=True)
return _CDLL
|
asedunov/intellij-community
|
python/testData/refactoring/unwrap/tryUnwrap_before.py
|
Python
|
apache-2.0
| 61
| 0.081967
|
try:
#comment
x = 1<car
|
et>
y = 2
except:
pass
|
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Djurisic 1999 - Graphite-o.py
|
Python
|
gpl-3.0
| 2,735
| 0.025077
|
# -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-02
# Original data: Djurišić and Li 1999, https://doi.org/10.1063/1.369370
import numpy as np
import matplotlib.pyplot as plt
# LD model parameters - Normal polarization (ordinary)
ωp = 27
εinf = 1.070
f0 = 0.014
Γ0 = 6.365
ω0 = 0
α0 = 0
f1 = 0.073
Γ1 = 4.102
ω1 = 0.275
α1 = 0.505
f2 = 0.056
Γ2 = 7.328
ω2 = 3.508
α2 = 7.079
f3 = 0.069
Γ3 = 1.414
ω3 = 4.451
α3 = 0.362
f4 = 0.005
Γ4 = 0.46 # 0.046 in the original paper!
ω4 = 13.591
α4 = 7.426
f5 = 0.262
Γ5 = 1.862
ω5 = 14.226
α5 = 3.82e-4
f6 = 0.460
Γ6 = 11.922
ω6 = 15.550
α6 = 1.387
f7 = 0.200
Γ7 = 39.091
ω7 = 32.011
α7 = 28.963
def LD(ω):
ε = εinf;
Γ = Γ0*np.exp(-α0*((ω-ω0)/Γ0)**2)
ε -= f0*ωp**2 / ((ω**2-ω0**2)+1j*ω*Γ)
Γ = Γ1*np.exp(-α1*((ω-ω1)/Γ1)**2)
ε -= f1*ωp**2 / ((ω**2-ω1**2)+1j*ω*Γ)
Γ = Γ2*np.exp(-α2*((ω-ω2)/Γ2)**2)
ε -= f2*ωp**2 / ((ω**2-ω2**2)+1j*ω*Γ)
Γ = Γ3*np.exp(-α3*((ω-ω3)/Γ3)**2)
ε -= f3*ωp**2 / ((ω**2-ω3**2)+1j*ω*Γ)
Γ = Γ4*np.exp(-α4*((ω-ω4)/Γ4)**2)
ε -= f4*ωp**2 / ((ω**2-ω4**2)+1j*ω*Γ)
Γ = Γ5*np.exp(-α5*((ω-ω5)/Γ5)**2)
ε -= f5*ωp**2 / ((ω**2-ω5**2)+1j*ω*Γ)
Γ = Γ6*np.exp(-α6*((ω-ω6)/Γ6)**2)
ε -= f6*ωp**2 / ((ω**2-ω6**2)+1j*ω*Γ)
Γ = Γ7*np.exp(-α7*((ω-ω7)/Γ7)**2)
ε -= f7*ωp**2 / ((ω**2-ω7**2)+1j*ω*Γ)
return ε
ev_min=0.12
ev_max=40
npoints=1000
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
ε = LD(eV)
n = (ε**.5).real
k = (ε**.5).imag
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, ε.imag, label="ε2")
pl
|
t.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(2)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(3)
plt.plot(μm, n, label="n")
plt.plot
|
(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
|
alazo/ase
|
pec/migrations/0007_auto_20170601_1557.py
|
Python
|
agpl-3.0
| 653
| 0.001531
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-01 15:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pec', '0006_auto_20170601_0719'),
]
operations = [
migrations.AddField(
|
model_name='cours',
name='type',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='cours',
|
name='objectifs_evaluateurs',
field=models.ManyToManyField(blank=True, to='pec.ObjectifEvaluateur'),
),
]
|
drtyrsa/django-cached-modelforms
|
cached_modelforms/tests/test_fields.py
|
Python
|
bsd-2-clause
| 6,699
| 0.001941
|
# -*- coding:utf-8 -*-
from django import forms
try:
from django.utils.encoding import smart_unicode as smart_text
except ImportError:
from django.utils.encoding import smart_text
from cached_modelforms.tests.utils import SettingsTestCase
from cached_modelforms.tests.models import SimpleModel
from cached_modelforms import (
CachedModelChoiceField, CachedModelMultipleChoiceField)
class TestFields(SettingsTestCase):
def setUp(self):
self.settings_manager.set(INSTALLED_APPS=('cached_modelforms.tests',))
self.obj1 = SimpleModel.objects.create(name='name1')
self.obj2 = SimpleModel.objects.create(name='name2')
self.obj3 = SimpleModel.objects.create(name='name3')
self.cached_list = [self.obj1, self.obj2, self.obj3]
class FormSingle(forms.Form):
obj = CachedModelChoiceField(
objects=lambda:self.cached_list,
required=False
)
class FormMultiple(forms.Form):
obj = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list,
required=False
)
self.FormSingle = FormSingle
self.FormMultiple = FormMultiple
def test_modelchoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelChoiceField(objects=lambda:self.cached_list)
as_iterable = CachedModelChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelChoiceField(objects=lambda:dict(list_of_tuples))
choices_without_empty_label = as_list.choices[:]
if as_list.empty_label is not None:
choices_without_empty_label.pop(0)
#
|
make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices
)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects
)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, .
|
..}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
# ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
choices_without_empty_label,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list]
)
def test_modelmultiplechoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list
)
as_iterable = CachedModelMultipleChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelMultipleChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelMultipleChoiceField(objects=dict(list_of_tuples))
# make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
# ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
as_list.choices,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list]
)
def test_modelchoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormSingle({'obj': smart_text(self.obj1.pk)})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], self.obj1)
# no value
form = self.FormSingle({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], None)
# invalid value
form = self.FormSingle({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelmultiplechoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), smart_text(self.obj2.pk)]})
self.assertTrue(form.is_valid())
self.assertEqual(set(form.cleaned_data['obj']), set([self.obj1, self.obj2]))
# no value
form = self.FormMultiple({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], [])
# invalid value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), '-1']})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
# invalid list
form = self.FormMultiple({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelchoicefield_objects_assignment(self):
field = CachedModelChoiceField(objects=self.cached_list)
field2 = CachedModelChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)
def test_modelmultiplechoicefield_objects_assignment(self):
field = CachedModelMultipleChoiceField(objects=self.cached_list)
field2 = CachedModelMultipleChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)
|
gmist/gae-de-init
|
main/apps/user/views.py
|
Python
|
mit
| 7,664
| 0.010569
|
# coding: utf-8
import copy
from google.appengine.ext import ndb
import flask
from apps import auth
from apps.auth import helpers
from core import task
from core import util
import config
import forms
import models
bp = flask.Blueprint(
'user',
__name__,
url_prefix='/user',
template_folder='templates',
)
###############################################################################
# User List
###############################################################################
@bp.route('/', endpoint='list')
@auth.admin_required
def user_list():
user_dbs, user_cursor, prev_cursor = models.User.get_dbs(
email=util.param('email')
)
permissions = list(forms.UserUpdateForm._permission_choices)
permissions += util.param('permissions', list) or []
return flask.render_template(
'user/admin/list.html',
html_class='user-list',
title='User List',
user_dbs=user_dbs,
next_url=util.generate_next_url(user_cursor),
prev_url=util.generate_next_url(prev_cursor),
permissions=sorted(set(permissions)),
api_url=flask.url_for('api.users')
)
@bp.route('/<int:user_id>/update/', methods=['GET', 'POST'], endpoint='update')
@auth.admin_required
def user_update(user_id):
user_db = models.User.get_by_id(user_id)
if not user_db:
flask.abort(404)
form = forms.UserUpdateForm(obj=user_db)
for permission in user_db.permissions:
form.permissions.choices.append((permission, permission))
form.permissions.choices = sorted(set(form.permissions.choices))
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append('This username is invalid.')
elif not models.User.is_username_available(form.username.data, user_db.key):
form.username.errors.append('This username is already taken.')
else:
form.populate_obj(user_db)
if auth.current_user_id() == user_db.key.id():
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for(
'user.list', order='-modified', active=user_db.active,
))
return flask.render_template(
'user/admin/update.html',
title=user_db.name,
html_class='user-update',
form=form,
user_db=user_db,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe())
)
@bp.route('/verify_email/<token>/')
@auth.login_required
def verify_email(token):
user_db = auth.current_user_db()
if user_db.token != token:
flask.flash('That link is either invalid or expired.', category='danger')
return flask.redirect(flask.url_for('user.profile_update'))
user_db.verified = True
user_db.token = util.uuid()
user_db.put()
flask.flash('Hooray! Your email is now verified.', category='success')
return flask.redirect(flask.url_for('user.profile_update'))
@bp.route('/merge/', methods=['GET', 'POST'])
@auth.admin_required
def merge():
user_keys = util.param('user_keys', list)
if not user_keys:
flask.abort(400)
user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys]
user_dbs = ndb.get_multi(user_db_keys)
if len(user_dbs) < 2:
flask.abort(400)
user_dbs.sort(key=lambda user_db: user_db.created)
merged_user_db = user_dbs[0]
auth_ids = []
permissions = []
is_admin = False
is_active = False
for user_db in user_dbs:
auth_ids.extend(user_db.auth_ids)
permissions.extend(user_db.permissions)
is_admin = is_admin or user_db.admin
is_active = is_active or user_db.active
if user_db.key.urlsafe() == util.param('user_key'):
merged_user_db = user_db
auth_ids = sorted(list(set(auth_ids)))
permissions = sorted(list(set(permissions)))
merged_user_db.permissions = permissions
merged_user_db.admin = is_admin
merged_user_db.active = is_active
merged_user_db.verified = False
form_obj = copy.deepcopy(merged_user_db)
form_obj.user_key = merged_user_db.key.urlsafe()
form_obj.user_keys = ','.join(user_keys)
form = forms.UserMergeForm(obj=form_obj)
if form.validate_on_submit():
form.populate_obj(merged_user_db)
merged_user_db.auth_ids = auth_ids
merged_user_db.put()
deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key]
merge_user_dbs(merged_user_db, deprecated_keys)
return flask.redirect(
flask.url_for('user.update', user_id=merged_user_db.key.id()),
)
return flask.render_template(
'user/admin/merge.html',
title='Merge Users',
html_class='user-merge',
user_dbs=user_dbs,
merged_user_db=merged_user_db,
form=form,
auth_ids=auth_ids,
api_url=flask.url_for('api.users', user_keys=','.join(user_keys))
)
@ndb.transactional(xg=True)
def merge_user_dbs(user_db, deprecated_keys):
# TODO: Merge possible user data before handling deprecated users
deprecated_dbs = ndb.get_multi(deprecated_keys)
for deprecated_db in deprecated_dbs:
deprecated_db.auth_ids = []
deprecated_db.active = False
deprecated_db.verified = False
if not deprecated_db.username.startswith('_'):
deprecated_db.username = '_%s' % deprecated_db.username
ndb.put_multi(deprecated_dbs)
@bp.route('/profile/')
@auth.login_required
def profile():
user_db = auth.current_user_db()
return flask.render_template(
'user/profile/index.html',
title=user_db.name,
html_class='profile-view',
user_db=user_db,
has_json=True,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe()),
)
@bp.route('/profile/update/', methods=['GET', 'POST'])
@auth.login_required
def profile_update():
user_db = auth.current_user_db()
form = forms.ProfileUpdateForm(obj=user_db)
if form.validate_on_submit():
email = form.email.data
if email and not user_db.is_email_available(email, user_db.key):
form.email.errors.append('This email is already taken.')
if not form.errors:
send_verification = not user_db.token or user_db.email != email
form.populate_obj(user_db)
if send_verification:
user_db.verified = False
task.verify_email_notification(user_db)
user_db.put()
retur
|
n flask.redirect(flask.url_for('pages.welcome'))
return flask.r
|
ender_template(
'user/profile/update.html',
title=user_db.name,
html_class='profile',
form=form,
user_db=user_db,
)
@bp.route('/profile/password/', methods=['GET', 'POST'])
@auth.login_required
def profile_password():
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
user_db = auth.current_user_db()
form = forms.ProfilePasswordForm(obj=user_db)
if form.validate_on_submit():
errors = False
old_password = form.old_password.data
new_password = form.new_password.data
if new_password or old_password:
if user_db.password_hash:
if helpers.password_hash(user_db, old_password) != user_db.password_hash:
form.old_password.errors.append('Invalid current password')
errors = True
if not errors and old_password and not new_password:
form.new_password.errors.append('This field is required.')
errors = True
if not (form.errors or errors):
user_db.password_hash = helpers.password_hash(user_db, new_password)
flask.flash('Your password has been changed.', category='success')
if not (form.errors or errors):
user_db.put()
return flask.redirect(flask.url_for('user.profile'))
return flask.render_template(
'user/profile/password.html',
title=user_db.name,
html_class='profile-password',
form=form,
user_db=user_db,
)
|
iuscommunity/dmirr
|
src/dmirr.hub/dmirr/hub/lib/geo.py
|
Python
|
gpl-2.0
| 735
| 0.013605
|
from django.conf import settings
from geopy import distance, geocoders
import pygeoip
def get_geodata_by_ip(addr):
gi = pygeoip.GeoIP(settings.GEO_CIT
|
Y_FILE, pygeoip.MEMORY_CACHE)
geodata = gi.record_by_addr(addr)
return geodata
def get_geodata_by_region(*args):
gn = geocoders.GeoNames()
return gn.geocode(' '.join(args), exactly_one=False)[0]
def get_distance(location1, location2):
"""
Calculate distance between two locations
|
, given the (lat, long) of each.
Required Arguments:
location1
A tuple of (lat, long).
location2
A tuple of (lat, long).
"""
return distance.distance(location1, location2).miles
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/admin/modules_handler.py
|
Python
|
bsd-3-clause
| 934
| 0.001071
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an
|
"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#
|
limitations under the License.
#
"""A handler that displays servers and their instances."""
from google.appengine.tools.devappserver2.admin import admin_request_handler
class ModulesHandler(admin_request_handler.AdminRequestHandler):
def get(self):
values = {'modules': self.dispatcher.modules}
self.response.write(self.render('modules.html', values))
|
rck109d/projectEuler
|
src/euler/p3.py
|
Python
|
lgpl-3.0
| 433
| 0
|
def isPrime(num):
if num <= 1:
return False
i = 2
while i < num / 2 + 1:
if num % i == 0:
return False
i += 1
return True
big = 600851475143
test = 1
while test < big:
test += 1
if big % test == 0:
print(test, ' divides evenly')
div = big / test
print('candidate ', div)
if isPrime(div):
print('found ', div)
|
break
|
|
ajpotato214/Finance-Data-Scraper-API
|
finance_data_scraper/scrapers/finviz.py
|
Python
|
mit
| 4,390
| 0.0082
|
#!/usr/bin/python3
from scrapers.scrape import scrape_page
# if you want to use this scraper without the RESTful api webservice then
# change this import: from scrape import scrape_page
import re
try:
import pandas as pd
pandasImported = True
except ImportError:
pandasImported = False
BASE_URL = "http://finviz.com/quote.ashx?t="
VALUE_NAMES_XPATH = '//*[@class="snapshot-td2-cp"]/text()'
VALUES_XPATH = '//*[@class="snapshot-td2"]/b/text() | //*[@class="snapshot-td2"]/b/*/text()'
def get_statistics_table(page):
"""
This function will return the financial statistics table on a stock's finviz page, if it exists as a
Python dictionary
:param page: HTML tree structure based on the html markup of the scraped web page.
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise will
return a empty dictionary
"""
value_names = page.xpath(VALUE_NAMES_XPATH)
values = page.xpath(VALUES_XPATH)
values = [value if value != "-" else None for value in values]
table = dict(zip(value_names, values))
return table
def get_statistic(ticker_symbol, stat_name, page=None):
"""
This function will get the associated financial statistic from the corresponding finviz page given the
statistic's name and the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GOOG", "MSFT")
:param stat_name: The name of the interested financial statistic (e.g., "P/E", "Price", "Volume").
An exhaustive list of available financial statistics can be found on a stock's finviz page
:param page: HTML tree structure based on the html markup of the scraped web page. If one is not passed in the
function will scrape the page
:return: the value of the interested financial statistic if it exists, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if stat_name in table.keys() and table[stat_name]:
return table[stat_name]
else:
return None
def get_all_statistics(ticker_symbol, page=None):
"""
This function will get all the associated financial statistics from the correspoding finviz page
given the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GGOG", "MSFT")
:param page: HTML tree structure based on the html markup of the scraped page. If one is not passed in the
function will scrape the page
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if table:
return table
else:
return None
def get_all_statistics_series(ticker_symbol):
"""
Return pandas Series of ticker symbol. Try to convert to numeric.
"""
if not pandasImported:
raise Exception("Pandas not installed.")
d = get_all_statistics(ticker_symbol)
new_dict = {}
for k,v in d.items():
if v == None:
continue
if ('%' in v) and (v.index('%') == (len(v)-1)):
# percent
new_dict[k + '(%)'] = float(v[:-1])
elif (k == '52W Range'):
m = re.match('([0-9\.\-]+) - ([0-9\.\-]+)',v)
new_dict['52W Low'] = float(m.group(1))
new_dict['52W High'] = float(m.group(2))
else:
try:
# remove any commas
v = re.sub(',','',v)
v = re.sub('B','E9',v) # expoentiate billions
v = re.sub('M','E6',v)
v = re.sub('K','E3',v)
new_dict[k] = float(v)
except ValueError:
new_dict[k] = v
return pd.Series(new_dict)
def get_all_statistics_df(symbol_list):
"""Return a dataframe for a list of symbols.
"""
series = []
for s in symbol_list:
series
|
.append(get_all_statistics_series(s))
return pd.DataFr
|
ame(series,index=symbol_list)
if __name__ == "__main__":
# Test Cases
print(get_statistic("AAPL", "P/E"))
print(get_statistic("AAPL", "Inst Own"))
print(get_statistic("AAPL", "Change"))
print(get_statistic("AAPL", "This should return None"))
print(get_all_statistics("AAPL"))
|
tomekby/miscellaneous
|
jira-invoices/calculator.py
|
Python
|
mit
| 6,443
| 0.003889
|
from w3lib.html import remove_tags
from requests import session, codes
from bs4 import BeautifulSoup
# Net/gross calculator for student under 26 years
class Student:
_hours = 0
_wage = 0
_tax_rate = 18
_cost = 20
def __init__(self, hours, wage, cost):
self._hours = hours
self._wage = wage
self._cost = cost
def _get_real_tax_rate(self):
tax_from = (100 - self._cost) / 100
return tax_from * self._tax_rate / 100
def get_net(self):
return self._wage * self._hours
def get_gross(self):
value = self.get_net() / (1 - self._get_real_tax_rate())
return int(value + 0.5)
def get_tax_base(self):
return self.get_gross() - self.get_cost()
def get_cost(self):
return self.get_gross() - self.get_gross() * (100 - self._cost) / 100
def get_tax(self):
return self.get_gross() - self.get_net()
def get_cost_percentage(self):
return self._cost
# Net/gross calculator using web client with optional fallback
class WebCalculator:
_data = None
_calculator = None
_cost = 0
def __init__(self, hours, wage, cost):
from tools import Config
self._cost = cost
self._data = Config.get_calculator_bot().parse(hours * wage, 1 if cost == 50 else 0)
# Check if bot returned some data
if self._data == None:
self._calculator = Config.get_fallback_calculator()(hours, wage, cost)
def get_net(self):
if self._data == None:
return self._calculator.get_net()
return self._data['net']
def get_gross(self):
if self._data == None:
return self._calculator.get_gross()
return self._data['gross']
def get_tax_base(self):
if self._data == None:
return self._calculator.get_tax_base()
return self._data['tax_base']
def get_cost(self):
if self._data == None:
return self._calculator.get_cost()
return
|
self._
|
data['cost']
def get_tax(self):
if self._data == None:
return self._calculator.get_tax()
return self._data['tax']
def get_cost_percentage(self):
return self._cost
# Bot finding invoice values on wfirma.pl calculator page
class WfirmaPlBot:
_url = 'https://poradnik.wfirma.pl/staff_contract_headers/evaluate/errand'
# Send needed data
@staticmethod
def parse(net, copyright):
from tools import Config
# Prepare data for request
form_data = Config.get('wfirma.pl')
header_data = {
'quota_type': form_data['quota_type'],
'quota': net,
'company_incidental': form_data['company_incidental'],
}
form_data['copyright'] = copyright
with session() as c:
# convert data to format viable for url-encoding
data = {}
for k, v in form_data.items():
data['data[StaffContractErrand][%s]' % k] = v
for k, v in header_data.items():
data['data[StaffContractHeader][%s]' % k] = v
# Send the request to the server
try:
request = c.post(WfirmaPlBot._url, data=data, timeout=3)
except:
print('Przekroczono maksymalny czas oczekiwania na odpowiedź serwera')
return None
# There was some error (most likely server-side), so use offline fallback
if request.status_code != codes.ok:
print('Wystąpił błąd podczas pobierania danych do rachunku')
return None
return WfirmaPlBot._parse_results(request.text)
# Parse data returned on request
@staticmethod
def _parse_results(request_body):
# extract wanted data
soup = BeautifulSoup(request_body.replace('\n', ''), 'xml')
interesting_columns = soup.findAll('td')[1:15:2]
# convert to floats
interesting_columns = list(map(lambda x: float(x.get_text().replace(' ', '').replace(',', '.')), interesting_columns))
column_names = [
'net', 'gross', 'all_cost', 'insurance_base', 'cost', 'tax_base', 'tax',
]
result = {}
for i in range(0, 7):
result[column_names[i]] = interesting_columns[i]
return result
# @todo nie można ustalić kosztów uzyskania
class KalkulatoryNfBot:
_url = 'http://kalkulatory.nf.pl/kalkulator/wynagrodzenie/zlecenie'
# Send needed data
@staticmethod
def parse(net, copyright):
return None
from tools import Config
form_data = Config.get('kalkulatory.nf.pl')
form_data = {**form_data, **{
'stawka': 'net',
'kwota': net,
'_method': 'POST',
}}
with session() as c:
# Fix data format
data = {}
for k, v in form_data.items():
data['data[Calculator][%s]' % k] = v
# Try to make a request
try:
request = c.post(KalkulatoryNfBot._url, data=data, timeout=3)
except:
print('Przekroczono maksymalny czas oczekiwania na odpowiedź serwera')
return None
# There was some error (most likely server-side), so use offline fallback
if request.status_code != codes.ok:
print('Wystąpił błąd podczas pobierania danych do rachunku')
return None
return KalkulatoryNfBot._parse_results(request.text)
# Parse data returned on request
@staticmethod
def _parse_results(request_body):
# extract wanted data
soup = BeautifulSoup(request_body)
table = soup.select('div.calc-body.clr')[0].find_next_sibling().findAll('td')[4:]
del table[3:7] # remove unneded
table = list(map(lambda x: float(x.get_text().replace(' zł', '').replace(' ', '').replace(',', '.')), table))
column_names = [
'cost', 'tax_base', 'tax', 'gross', 'net'
]
result = {}
for i in range(0, 5):
result[column_names[i]] = table[i]
return result
|
Arkapravo/morse-0.6
|
src/morse/middleware/socket_mw.py
|
Python
|
bsd-3-clause
| 6,797
| 0.00206
|
import logging; logger = logging.getLogger("morse." + __name__)
import socket
import select
import json
import morse.core.middleware
from functools import partial
from morse.core import services
class MorseSocketServ:
def __init__(self, port, component_name):
# List of socket clients
self._client_sockets = []
self._message_size = 1024
self._component_name = component_name
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((str(socket.INADDR_ANY), port))
self._server.listen(1)
logger.info("Socket Mw Server now listening on port " + str(port) + \
" for component " + str(component_name) + ".")
def __del__(self):
""" Terminate the ports used to accept requests """
if self._client_sockets:
logger.info("Closing client sockets...")
for s in self._client_sockets:
s.close()
if self._server:
logger.info("Shutting down connections to server...")
self._server.shutdown(socket.SHUT_RDWR)
logger.info("Closing socket server...")
self._server.close()
del self._server
def main_export(self, encode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, sockets, [], 0)
except select.error:
pass
except socket.error:
pass
if self._server in inputready:
sock, addr = self._server.accept()
self._client_sockets.append(sock)
if outputready != []:
message = encode(component_instance)
for o in outputready:
try:
o.send(message)
except socket.error:
self.close_socket(o)
def main_read(self, decode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, [], [], 0)
except select.error:
pass
except socket.error:
pass
for i in inputready:
if i == self._server:
sock, addr = self._server.accept()
if self._client_sockets != []:
logger.warning("More than one clients for an actuator!!")
self._client_sockets.append(sock)
else:
try:
msg = i.recv(self._message_size)
logger.debug("received msg %s" % msg)
if msg == b'':
self.close_socket(i)
else:
component_instance.local_data = decode(msg)
except socket.error as detail:
self.close_socket(i)
def close_socket(self, sock):
self._client_sockets.remove(sock)
try:
sock.close()
except socket.error as error_info:
logger.warning("Socket error catched while closing: " + str(error_info))
class MorseSocketClass(morse.core.middleware.MorseMiddlewareClass):
""" External communication using sockets. """
def __init__(self):
""" Initialize the socket connections """
# Call the constructor of the parent class
super(self.__class__,self).__init__()
# port -> MorseSocketServ
self._server_dict = {}
# component name (string) -> Port (int)
self._component_nameservice = {}
self._base_port = 60000
# Register two special services in the socket service manager:
# TODO To use a new special component instead of 'simulation',
# uncomment the line :-)
# bge.logic.morse_services.register_request_manager_mapping("streams", "SocketRequestManager")
services.do_service_registration(self.list_streams, 'simulation')
services.do_service_registration(self.get_stream_port, 'simulation')
services.do_service_registration(self.get_all_stream_ports, 'simulation')
def list_streams(self):
""" List all publish streams.
"""
return list(self._component_nameservice.keys())
def get_stream_port(self, name):
""" Get stream port for stream name.
"""
port = -1
try:
port = self._component_nameservice[name]
except KeyError:
pass
return port
def get_all_s
|
tream_ports(self):
""" Get stream ports for all streams.
"""
return self._component_nameservice
def register_component(self, component_name, component_instance, mw_data):
""" Open the port used to communicate by the specified component.
"""
# Create a socket server for this component
serv = MorseSocketServ(self._base_port, component_name)
self._ser
|
ver_dict[self._base_port] = serv
self._component_nameservice[component_name] = self._base_port
self._base_port = self._base_port + 1
# Extract the information for this middleware
# This will be tailored for each middleware according to its needs
function_name = mw_data[1]
fun = self._check_function_exists(function_name)
if fun != None:
# Choose what to do, depending on the function being used
# Data read functions
if function_name == "read_message":
component_instance.input_functions.append(partial(MorseSocketServ.main_read, serv, fun))
# Data write functions
elif function_name == "post_message":
component_instance.output_functions.append(partial(MorseSocketServ.main_export, serv, fun))
# If the function is external and has already been loaded before
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
def post_message(self, component_instance):
return (json.dumps(component_instance.local_data) + '\n').encode()
def read_message(self, msg):
return json.loads(msg.decode('utf-8'))
def print_open_sockets(self):
""" Display a list of all currently opened sockets."""
logger.info("Socket Mid: Currently opened sockets:")
for name, socket in self._socket_dict.iteritems():
logger.info(" - Port name '{0}' = '{1}'".format(name, socket))
|
nanomolina/MusicWeb
|
src/Music/apps/interpreter/migrations/0011_auto_20141215_0030.py
|
Python
|
mit
| 600
| 0.001667
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('interpreter', '0010_auto_20141215_0027'),
]
operations = [
migrations.RemoveField(
model_name='band',
name='members',
),
migrations.AddField(
model_name='artist
|
',
name='band',
field=models.ManyToManyField(related_name='members', nu
|
ll=True, to='interpreter.Band', blank=True),
preserve_default=True,
),
]
|
miur/miur
|
OLD/miur/cursor/dispatch.py
|
Python
|
gpl-3.0
| 1,733
| 0.000577
|
#
# SPDX-FileCopyrightText: 2016 Dmytro Kolomoiets <amerlyq@gmail.com> and contributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
from miur.cursor import state, update, message as msg
class Dispatcher:
"""Apply actions to any unrelated global states"""
def _err_wrong_cmd(self):
# Move err processing to 'u
|
pdate.py' (make more symmetrical)
# _log.error("Wrong cmd: {}".format(cmd))
raise NotImplementedError
def focus_node_next(self):
if state.cursor is not None and state.entries is not None:
state.cursor = min(state.cursor + 1, len(state.entries) - 1)
def focus_node_prev(self):
if state.cursor is not None and sta
|
te.entries is not None:
state.cursor = max(state.cursor - 1, 0)
def focus_node_beg(self):
if state.entries is not None:
state.cursor = 0
def focus_node_end(self):
if state.entries is not None:
state.cursor = len(state.entries) - 1
def shift_node_parent(self):
# DEV: combine these multiple queue in single request to *core*
# state.path =
# TEMP: apply directly to global state
# TEMP: send msg and wait until fully processed (send-recv-apply)
update.handle(msg.NodeGetParentMsg())
update.handle(msg.ListNodeMsg())
state.cursor = 0 if state.entries else None
def shift_node_current(self):
if state.cursor is None or state.entries is None:
return
# WARN: must send both (p, e) for *core*
# => to check if (p, e) is still available in fs
update.handle(msg.NodeGetChildMsg())
update.handle(msg.ListNodeMsg())
state.cursor = 0 if state.entries else None
|
sl0/adm6
|
tests/test_03_filter6.py
|
Python
|
gpl-3.0
| 102,052
| 0.003439
|
#!/usr/bin/env python
#encoding:utf8
#
# file: filter6_tests.py
# author: sl0
# date: 2013-03-06
#
import unittest
from adm6.filter6 import IP6_Filter, Ip6_Filter_Rule
from sys import stdout
from os.path import expanduser as homedir
from ipaddr import IPv6Network
from os import getenv as get_env
home_dir_replacement = get_env("HOME")
rule = {}
class Ip6_Filter_Rule_tests(unittest.TestCase):
"""
some tests for class Ip6_Filter_Rule
"""
def test_01_create_Filter_Rule(self):
"""
fr-01 create Filter_Rule object
"""
my_err = False
try:
f = Ip6_Filter_Rule(rule)
except:
my_err = True
self.assertFalse(my_err)
self.assertFalse(f['i_am_d'])
self.assertFalse(f['i_am_s'])
self.assertFalse(f['travers'])
self.assertFalse(f['insec'])
self.assertFalse(f['noif'])
self.assertFalse(f['nonew'])
self.assertFalse(f['nostate'])
self.assertEqual(f['sport'], u'1024:')
self.assertEqual(['Rule-Nr', 'Pair-Nr', 'RuleText'], f.CommentList)
self.assertEqual(['Output', 'debuglevel'], f.NeverDisplay)
displaylist = ['Rule-Nr', 'Pair-Nr', 'System-Name', 'System-Forward',
'OS', 'Asymmetric', 'RuleText', 'Source', 'Destin', 'Protocol',
'sport', 'dport', 'Action', 'nonew', 'noif', 'nostate', 'insec',
'i_am_s', 'i_am_d', 'travers', 'source-if', 'source-rn',
'src-linklocal', 'src-multicast', 'destin-if', 'destin-rn',
'dst-linklocal', 'dst-multicast', ]
self.assertEqual(displaylist, f.DisplayList)
#f['debuglevel'] = True
#print f
def test_02_produce_for_invalid_os_name(self):
"""
fr-02 produce for invalid os name
"""
my_err = False
try:
fr = Ip6_Filter_Rule(rule)
except:
my_err = True
fr['OS'] = 'Invalid os name'
self.assertRaises(ValueError, fr.produce ,stdout)
def test_03_produce_for_linux_as_traversed(self):
"""
fr-03 produce for linux as traversed host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
print "M:", fr.msg
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_04_produce_for_openbsd(self):
"""
fr-04 produce for OpenBSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
#expect = """# n o t y e t i m p l e m e n t e d !"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_05_produce_for_bsd(self):
"""
fr-05 produce for BSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
|
fr['Destin'] = "2001:db8
|
:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'BSD'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_06_produce_for_opensolaris(self):
"""
fr-06 produce for OpenSolaris
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenSolaris'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_07_produce_for_wxp(self):
"""
fr-07 produce for WXP
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Win-XP-SP3'
except:
my_err = True
fr.produce(ofile)
expect = "# System should not forward until redesigned"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_08_repr_with_debuglevel(self):
"""
fr-08 repr with debuglevel
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action
|
pomarec/core
|
arkos/connections.py
|
Python
|
gpl-3.0
| 3,011
| 0
|
"""
Classes and functions for interacting with system management daemons.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import ldap
import ldap.modlist
import xmlrpc.client
from .utilities import errors
from d
|
bus import SystemBus, Interface
class ConnectionsManager:
"""Manages arkOS connections to system-level processes via their APIs."""
def __init__(self, config, secrets):
self.config = config
self.secrets = secrets
def connect(self):
"""Initialize the connec
|
tions."""
self.connect_services()
self.connect_ldap()
def connect_services(self):
self.DBus = SystemBus()
self.SystemD = self.SystemDConnect(
"/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager")
self.Supervisor = supervisor_connect()
def connect_ldap(self):
self.LDAP = ldap_connect(
config=self.config, passwd=self.secrets.get("ldap")
)
def SystemDConnect(self, path, interface):
systemd = self.DBus.get_object("org.freedesktop.systemd1", path)
return Interface(systemd, dbus_interface=interface)
def ldap_connect(
uri="", rootdn="", dn="cn=admin", config=None, passwd="",
conn_type=""):
"""
Initialize a connection to arkOS LDAP.
:param str uri: LDAP host URI
:param str rootdn: Root DN
:param str dn: User DN
:param Config config: arkOS config to use for default values
:param str passwd: Password to use to validate credentials
:returns: LDAP connection object
"""
if not all([uri, rootdn, dn]) and not config:
raise errors.InvalidConfigError("No LDAP values passed")
uri = uri or config.get("general", "ldap_uri")
rootdn = rootdn or config.get("general", "ldap_rootdn")
conn_type = conn_type or config.get("general", "ldap_conntype")
if conn_type == "dynamic":
c = ldap.ldapobject.ReconnectLDAPObject(
uri, retry_max=3, retry_delay=5.0)
else:
c = ldap.initialize(uri)
try:
c.simple_bind_s("{0},{1}".format(dn, rootdn), passwd)
except ldap.INVALID_CREDENTIALS:
raise errors.ConnectionError("LDAP", "Invalid username/password")
except Exception as e:
raise errors.ConnectionError("LDAP") from e
if dn != "cn=admin":
data = c.search_s("cn=admins,ou=groups,{0}".format(rootdn),
ldap.SCOPE_SUBTREE, "(objectClass=*)",
["member"])[0][1]["member"]
if "{0},{1}".format(dn, rootdn) not in data:
raise errors.ConnectionError("LDAP", "Not an administrator")
return c
def supervisor_connect():
"""
Initialize a connection to Supervisor via XML-RPC API.
:returns: XML-RPC connection object
"""
try:
s = xmlrpc.client.Server("http://localhost:9001/RPC2")
return s.supervisor
except Exception as e:
raise errors.ConnectionError("Supervisor") from e
|
lab132/PyBake
|
PyBake/commands/basketCommand.py
|
Python
|
mit
| 1,990
| 0.009045
|
"""Commands for argparse for basket command"""
import textwrap
from PyBake import Path
from PyBake.commands import command
@command("basket")
class BasketModuleManager:
"""Module Manager for Basket"""
longDescription = textwrap.dedent(
"""
Retrieves pastries from the shop.
""")
def createArguments(self, basketParser):
basketParser.add_argument("shoppingList",
nargs="?",
default=Path("shoppingList.py"),
type=Path,
help="The shopping list script that describes which pastries are required. "
"Default: 'shoppingList.py'")
basketParser.add_argument("--force-download",
dest="force",
action="append_const",
const="download",
|
help="Download all required pastries, whether they exist locally already or not.")
basketParser.add_argument("--force-install",
dest="force",
action="append_const",
const="install",
help="Perform an install, regardless whether the pastry is already installed or not.")
basketParser.add_argument("--force",
dest="force",
action="append_const",
const="all",
help="Implies --force-download and --force-install.")
basketParser.set_defaults(func=execute_basket)
def execute_basket(args):
"""Execute the `basket` command."""
from PyBake import log
force = args.force or []
del args.force
args.forceDownload = any(arg in ("all", "download") for arg in force)
args.forceInstall = any(arg in ("all", "install") for arg in force)
log.debug(args)
from PyBake import basket
return basket.run(**vars(args))
|
|
ivanamihalek/tcga
|
icgc/60_nextgen_production/65_reactome_tree.py
|
Python
|
gpl-3.0
| 5,057
| 0.024916
|
#! /usr/bin/python3
#
# This source code is part of icgc, an ICGC processing pipeline.
#
# Icgc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Icgc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
# some pathways do not have the associated genes listed, probably by mistake
# examples:
# R-HSA-1483171 | Synthesis of BMP
# R-HSA-2408499 | Formation of selenosugars for excretion
from icgc_utils.common_queries import quotify
from icgc_utils.reactome import *
from config import Config
############
def print_genes(cursor, gene_ids, depth):
if len(gene_ids)<1:
print("\t"*depth, "no genes listed")
return
#print("\t"*depth, "print genes here")
gene_id_string = ",".join([quotify(z) for z in gene_ids])
qry = "select ensembl_gene_id, approved_name from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_names = dict(hard_landing_search(cursor, qry))
qry = "select ensembl_gene_id, approved_symbol from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_symbols = dict(hard_landing_search(cursor, qry))
for gene in gene_ids:
print("\t"*depth, gene_symbols.get(gene,""), gene_names.get(gene,""))
return
##############
def
|
characterize_subtree(cursor, graph, pthwy_id, gene_groups, depth, verbose=True):
# this is the whole subtree
# children = [node for node in nx.dfs_preorder_nodes(graph, pthwy_id)]
# A successor of n is a
|
node m such that there exists a directed edge from n to m.
children = [node for node in graph.successors(pthwy_id)]
if len(children)==0: return False
node_id_string = ",".join([quotify(z) for z in children])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
children_names = hard_landing_search(cursor, qry_template % node_id_string)
for child_id, child_name in children_names:
# number_of_genes = genes related to nodes without descendants
genes = genes_in_subgraph(cursor, graph, child_id)
if verbose: print("\t"*depth, child_id, child_name, len(genes))
if len(genes)<100:
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
if not characterize_subtree(cursor, graph, child_id, gene_groups, depth+1, verbose=verbose): # no further subdivisions
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
return True
#########################################
import numpy as np
from matplotlib import pyplot as plt
def hist_plot(gene_groups):
data = [len(gene_list) for gene_list in list(gene_groups.values())]
# fixed bin size
bins = np.arange(0, 505, 5) # fixed bin size
plt.xlim(0,500)
plt.hist(data, bins=bins, alpha=0.5)
# plt.title('')
plt.xlabel('number of genes in group (bin size = 5)')
plt.ylabel('number of groups')
#
plt.show()
####################################################
def main():
verbose = False
db = connect_to_mysql(Config.mysql_conf_file)
cursor = db.cursor()
switch_to_db(cursor, 'icgc')
# are there children with multiple parents? Yes. So I need some kind of
# directed graph, rather tha a tree.
qry = "select child, count(distinct parent) as ct from reactome_hierarchy "
qry += "group by child having ct>1"
ret = search_db(cursor, qry)
print("number of children with multiple parents:", len(ret))
# feed the parent/child pairs as edges into graph
graph = build_reactome_graph(cursor, verbose=True)
# candidate roots
zero_in_degee_nodes = get_roots(graph)
node_id_string = ",".join([quotify(z) for z in zero_in_degee_nodes])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
root_names = hard_landing_search(cursor, qry_template% node_id_string)
gene_groups = {}
for pthwy_id, name in root_names:
if "disease" in name.lower(): continue
if verbose: print(pthwy_id, name)
characterize_subtree(cursor, graph, pthwy_id, gene_groups, 1, verbose=verbose)
print("\n===========================")
max_group=0
for group, genes in gene_groups.items():
groupsize = len(genes)
if max_group< groupsize: max_group=groupsize
print (group, len(genes))
print("\n===========================")
print("number of groups", len(gene_groups))
print("largest group", max_group)
print("\n===========================")
for pthwy_name, genes in gene_groups.items():
if len(genes)<=150: continue
print("\n",pthwy_name, len(genes))
#print_genes(cursor, genes, 1)
#hist_plot(gene_groups)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
|
chromium2014/src
|
tools/telemetry/telemetry/util/find_dependencies.py
|
Python
|
bsd-3-clause
| 9,256
| 0.010372
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import cloud_storage
from telemetry.util import bootstrap
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def _InDirectory(subdirectory, directory):
subdirectory = os.path.realpath(subdirectory)
directory = os.path.realpath(directory)
common_prefix = os.path.commonprefix([subdirectory, directory])
return common_prefix == directory
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(
os.path.realpath(os.path.join(util.GetChromiumSrcDir(), os.pardir, path))
for path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not _InDirectory(module_path, util.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
util.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
for page in page_set:
if page.is_file:
yield page.serving_dir
def FindExcludedFiles(files, options):
def MatchesConditions(path, conditions):
for condition in conditions:
if condition(path):
return True
return False
# Define some filters for files.
def IsHidden(path):
for pathname_component in path.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path):
return os.path.splitext(path)[1] == '.pyc'
def IsInCloudStorage(path):
return os.path.exists(path + '.sha1')
def MatchesExcludeOptions(path):
for pattern in options.exclude:
if (fnmatch.fnmatch(path, pattern) or
fnmatch.fnmatch(os.path.basename(path), p
|
attern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for path in files:
if MatchesC
|
onditions(path, exclude_conditions):
yield path
def FindDependencies(paths, options):
# Verify arguments.
for path in paths:
if not os.path.exists(path):
raise ValueError('Path does not exist: %s' % path)
dependencies = path_set.PathSet()
# Including __init__.py will include Telemetry and its dependencies.
# If the user doesn't pass any arguments, we just have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py')))
dependencies |= FindBootstrapDependencies(util.GetTelemetryDir())
# Add dependencies.
for path in paths:
base_dir = os.path.dirname(os.path.realpath(path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path, base_dir))
zip_file.write(path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for path in paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(path, gsutil_base_dir))
zip_file.write(path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option(
'-v', '--verbose', action='c
|
rcmorano/gecosws-config-assistant
|
firstboot/serverconf/ServerConf.py
|
Python
|
gpl-2.0
| 5,970
| 0.00067
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
import firstboot.serverconf
from ChefConf import ChefConf
from GCCConf import GCCConf
from AuthConf import AuthConf
from DateSyncConf import DateSyncConf
from UsersConf import UsersConf
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class ServerConf():
# Version of the configuration JSON file
def __init__(self):
self._data = {}
self.VERSION = '0.2.0'
self._data['gem_repo'] = 'http://rubygems.org'
self._data['version'] = self.VERSION
self._data['organization'] = ''
self._chef_conf = ChefConf()
self._gcc_conf = GCCConf()
self._auth_conf = AuthConf()
self._ntp_conf = DateSyncConf()
self._users_conf = UsersConf()
def load_data(self, conf):
msg = 'ServerConf: Key "%s" not found in the configuration file.'
try:
v = conf['version']
if v != self.VERSION:
print 'WARNING: ServerConf and AUTOCONFIG_JSON version mismatch!'
except KeyError as e:
print msg % ('version',)
try:
|
self.set_organization(conf['organization'])
except KeyError as e:
print msg % ('organization',)
try:
self.set_notes(conf['notes'])
except KeyError as e:
print msg % ('notes',)
try:
self.set_gem_repo(conf['gem_repo'])
except KeyError as e:
print msg % ('gem_repo',)
try:
self._chef_conf.load_data(conf['chef'])
except KeyError as e:
print msg % ('chef',)
|
try:
self._gcc_conf.load_data(conf['gcc'])
except KeyError as e:
print msg % ('gcc',)
try:
self._auth_conf.load_data(conf['auth'])
except KeyError as e:
print msg % ('auth',)
try:
self._ntp_conf.load_data(conf['uri_ntp'])
except KeyError as e:
print msg % ('ntp',)
def validate(self):
valid = len(self._data['version']) > 0 \
and self._chef_conf.validate() \
and self._auth_conf.validate() \
and self._ntp_conf.validate() \
and self._gcc_conf.validate()
return valid
def set_gem_repo(self, repo):
self._data['gem_repo'] = repo
return self
def get_gem_repo(self):
return self._data['gem_repo'].encode('utf-8')
def get_version(self):
return self._data['version'].encode('utf-8')
def set_version(self, version):
self._data['version'] = version
return self
def get_organization(self):
return self._data['organization'].encode('utf-8')
def set_organization(self, organization):
self._data['organization'] = organization
return self
def get_notes(self):
return self._data['notes'].encode('utf-8')
def set_notes(self, notes):
self._data['notes'] = notes
return self
def get_auth_conf(self):
return self._auth_conf
def get_chef_conf(self):
return self._chef_conf
def get_ntp_conf(self):
return self._ntp_conf
def get_gcc_conf(self):
return self._gcc_conf
def get_users_conf(self):
return self._users_conf
def set_auth_conf(self, auth_conf):
self._auth_conf = auth_conf
return self
def set_chef_conf(self, chef_conf):
self._chef_conf = chef_conf
return self
def set_ntp_conf(self, ntp_conf):
self._ntp_conf = ntp_conf
return self
def set_gcc_conf(self, gcc_conf):
self._gcc_conf = gcc_conf
return gcc_conf
def set_users_conf(self, user_conf):
self._users_conf = user_conf
return self
|
axlt2002/script.light.imdb.ratings.update
|
resources/core/update_main.py
|
Python
|
gpl-3.0
| 10,373
| 0.050998
|
# -*- coding: utf-8 -*-
#############################
# Light IMDb Ratings Update #
# by axlt2002 #
#############################
# changes by dziobak #
#############################
import xbmc, xbmcgui
import sys
if sys.version_info >= (2, 7): import json as jSon
else: import simplejson as jSon
from common import *
from imdb_scraper import parse_IMDb_page
from tvdb_scraper import get_IMDb_ID_from_theTVDB
from tmdb_scraper import get_IMDb_ID_from_TMDb
from thread import start_new_thread, allocate_lock
max_threads = int(NumberOfThreads) - 1 #0 - 1 thread, 1 - 2 threads ...
num_threads = 0
def thread_parse_IMDb_page(dType, dbID, IMDb, Title, Rating, Votes, TVDB, TMDB, season, episode, lock, flock):
#movie: MovieID, IMDb, Title, Rating, Votes, Top250
#tvshow: TVShowID, IMDb, Title, Rating, Votes, TVDB, TMDB
#episode: EpisodeID, IMDb, Title, Rating, Vo
|
tes, TVDB, TMDB, season, episode
global nu
|
m_threads
if IMDb == None or IMDb == "" or "tt" not in IMDb: IMDb = None
Top250 = None
if dType == "movie":
Top250 = TVDB
if Top250 == None: Top250 = 0
TVDB = None
defaultLog( addonLanguage(32507) % ( Title, IMDb, TVDB, TMDB ) )
if IMDb == None:
if dType == "tvshow" or dType == "episode":
(IMDb, statusInfo) = get_IMDb_ID_from_theTVDB(dType, TVDB)
if IMDb == None:
(IMDb, add_statusInfo) = get_IMDb_ID_from_TMDb(dType, TMDB, season, episode)
statusInfo = statusInfo + "\n" + add_statusInfo
elif dType == "movie":
statusInfo = "Missing IMDb ID"
if IMDb == None:
defaultLog( addonLanguage(32503) % ( Title ) )
flock.acquire()
try:
statusLog( Title + ":\n" + statusInfo )
finally:
flock.release()
lock.acquire()
num_threads -= 1
lock.release()
return
(updatedRating, updatedVotes, updatedTop250, statusInfo) = parse_IMDb_page(IMDb)
if updatedRating == None:
defaultLog( addonLanguage(32503) % ( Title ) )
flock.acquire()
try:
statusLog( Title + ":\n" + statusInfo )
finally:
flock.release()
else:
Rating = str( float( ( "%.1f" % Rating ) ) )
Votes = '{:,}'.format( int ( Votes ) )
defaultLog( addonLanguage(32499) % ( Rating, Votes, Top250 ) )
if (dType != "movie"):
updatedTop250 = None
if Rating != updatedRating or ( Votes != updatedVotes and \
((dType == "movie" and IncludeMoviesVotes == "true" ) or ((dType == "tvshow" or dType == "episode") and IncludeTVShowsVotes == "true")) or \
( dType == "movie" and (Top250 != updatedTop250) and IncludeMoviesTop250 == "true" )):
if (dType == "movie"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetMovieDetails","params":{"movieid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","top250":' + str( updatedTop250 ) + '},"id":1}'
elif (dType == "tvshow"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetTVShowDetails","params":{"tvshowid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}'
elif (dType == "episode"):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetEpisodeDetails","params":{"episodeid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
defaultLog( addonLanguage(32500) % ( Title, str( updatedRating ), str( updatedVotes ), str( updatedTop250 ) ) )
else:
defaultLog( addonLanguage(32502) % ( Title ) )
lock.acquire()
num_threads -= 1
lock.release()
return
class Movies:
def __init__( self ):
defaultLog( addonLanguage(32255) )
statusLog( "\n" + "--> " + addonLanguage(32255).rsplit(' ', 1)[0] )
if ShowNotifications == "true":
doNotify( addonLanguage(32255), 5000 )
xbmc.sleep(5000)
self.AllMovies = []
self.getDBMovies()
self.lock = allocate_lock()
self.flock = allocate_lock()
self.doUpdate()
defaultLog( addonLanguage(32258) )
if ShowNotifications == "true":
doNotify( addonLanguage(32258), 5000 )
xbmc.sleep(5000)
def getDBMovies( self ):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetMovies","params":{"properties":["imdbnumber","rating","votes","top250","playcount"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'movies' ):
for item in jSonResponse['result']['movies']:
MovieID = item.get('movieid'); IMDb = item.get('imdbnumber'); Title = item.get('label');
Rating = item.get('rating'); Votes = item.get('votes'); Top250 = item.get('top250'); Watched = item.get('playcount');
self.AllMovies.append( ( MovieID, IMDb, Title, Rating, Votes, Top250, Watched ) )
except: pass
def doUpdate( self ):
global num_threads
AllMovies = len( self.AllMovies ); Counter = 0;
if ShowProgress == "true":
Progress = xbmcgui.DialogProgressBG()
Progress.create( addonLanguage(32261) )
for Movie in self.AllMovies:
while num_threads > max_threads:
xbmc.sleep(500)
if ShowProgress == "true":
Counter = Counter + 1
Progress.update( (Counter*100)/AllMovies, addonLanguage(32261), Movie[2] )
if int(Movie[6]) > 0 and ExcludeWatched == "true":
defaultLog( addonLanguage(32504) % ( Movie[2] ) )
continue
start_new_thread(thread_parse_IMDb_page,("movie",Movie[0],Movie[1],Movie[2],Movie[3],Movie[4],Movie[5],"","","",self.lock,self.flock))
self.lock.acquire()
num_threads += 1
self.lock.release()
while num_threads > 0:
xbmc.sleep(500)
if ShowProgress == "true":
Progress.close()
class TVShows:
def __init__( self ):
defaultLog( addonLanguage(32256) )
statusLog( "\n" + "--> " + addonLanguage(32256).rsplit(' ', 1)[0] )
if ShowNotifications == "true":
doNotify( addonLanguage(32256), 5000 )
xbmc.sleep(5000)
self.AllTVShows = []
self.getDBTVShows()
self.lock = allocate_lock()
self.flock = allocate_lock()
self.doUpdateTVShows()
defaultLog( addonLanguage(32259) )
if ShowNotifications == "true":
doNotify( addonLanguage(32259), 5000 )
xbmc.sleep(5000)
def getDBTVShows( self ):
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","params":{"properties":["uniqueid","rating","votes","playcount"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'tvshows' ):
for item in jSonResponse['result']['tvshows']:
TVShowID = item.get('tvshowid'); unique_id = item.get('uniqueid'); imdb_id = unique_id.get('imdb'); Title = item.get('label');
Rating = item.get('rating'); Votes = item.get('votes'); tvdb_id = unique_id.get('tvdb'); Watched = item.get('playcount'); tmdb_id = unique_id.get('tmdb');
self.AllTVShows.append( ( TVShowID, imdb_id, Title, Rating, Votes, tvdb_id, Watched, tmdb_id ) )
except: pass
def doUpdateEpisodes( self, tvshowid, tvshowtitle, tvshowtmdb_id, PCounter ):
global num_threads
jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetEpisodes","params":{"tvshowid":' + str( tvshowid ) + ', "properties":["uniqueid","rating","votes","playcount","episode","season"]},"id":1}'
debugLog( "JSON Query: " + jSonQuery )
jSonResponse = xbmc.executeJSONRPC( jSonQuery )
jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' )
debugLog( "JSON Response: " + jSonResponse )
jSonResponse = jSon.loads( jSonResponse )
try:
if jSonResponse['result'].has_key( 'episodes' ):
for item in jSonResponse['result']['episodes']:
while num_threads > max_threads:
xbmc.sleep(500)
EpisodeID = item.get('episodeid'); unique_id = item.get
|
fintech-circle/edx-platform
|
lms/envs/bok_choy.py
|
Python
|
agpl-3.0
| 8,553
| 0.002923
|
"""
Settings for Bok Choy tests that are used when running LMS.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from tempfile import mkdtemp
from openedx.core.release import RELEASE_LINE
CONFIG_ROOT = path(__file__).abspath().dirname()
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
######################### Testing overrides ####################################
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = [
(TEST_ROOT / "staticfiles" / "lms").abspath(),
]
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
# Webpack loader must use webpack output setting
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = TEST_ROOT / "staticfiles" / "lms" / "webpack-stats.json"
# Don't use compression during tests
PIPELINE_JS_COMPRESSOR = None
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
BLOCK_STRUCTURES_SETTINGS = dict(
# We have CELERY_ALWAYS_EAGER set to True, so there's no asynchronous
# code running and the celery routing is unimportant.
# It does not make sense to retry.
TASK_MAX_RETRIES=0,
# course publish task delay is irrelevant is because the task is run synchronously
COURSE_PUBLISH_TASK_DELAY=0,
# retry delay is irrelevent because we never retry
TASK_DEFAULT_RETRY_DELAY=0,
)
###################### Grade Downloads ######################
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'),
}
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1'
EDXNOTES_CONNECT_TIMEOUT = 10 # time in seconds
EDXNOTES_READ_TIMEOUT = 10 # time in seconds
NOTES_DISABLED_TABS = []
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable oauth authentication, which we test.
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable Course Discovery
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Open up endpoint for faking Software Secure responses
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['TEST_TIMEOUT'] = 5000
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can rese
|
t here to use
# a simpler security model
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS
FEATURES['SHOW_BUMPER_PERIODICITY'] = 1
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
|
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
FEATURES['ENABLE_DISCUSSION_HOME_PANEL'] = True
# Enable support for OpenBadges accomplishments
FEATURES['ENABLE_OPENBADGES'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Verify student settings
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# this secret key should be the same as cms/envs/bok_choy.py's
SECRET_KEY = "very_secret_bok_choy_key"
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
# Make sure we test with the extended history table
FEATURES['ENABLE_CSMH_EXTENDED'] = True
INSTALLED_APPS += ('coursewarehistoryextended',)
BADGING_BACKEND = 'lms.djangoapps.badges.backends.tests.dummy_backend.DummyBackend'
# Configure the LMS to use our stub eCommerce implementation
ECOMMERCE_API_URL = 'http://localhost:8043/api/v2/'
LMS_ROOT_URL = "http://localhost:8000"
if RELEASE_LINE == "master":
# On master, acceptance tests use edX books, not the default Open edX books.
HELP_TOKENS_BOOKS = {
'learner': 'http://edx.readthedocs.io/projects/edx-guide-for-students',
'course_author': 'http://edx.readthedocs.io/projects/edx-partner-course-staff',
}
# TODO: TNL-6546: Remove this waffle and flag code.
from django.db.utils import ProgrammingError
from waffle.models import Flag
try:
flag, created = Flag.objects.get_or_create(name='unified
|
gonczor/ServerPy
|
Setup/settings.py
|
Python
|
gpl-2.0
| 142
| 0
|
import os
ADDRESS = '127.0.0.1'
PORT =
|
12345
BACKUP_DIR = 'Backup'
BASE
|
_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
|
Drakulix/knex
|
evalData/testdata_insertion.py
|
Python
|
mit
| 897
| 0.00223
|
import os
import requests
if __name__ == "__main__":
session = requests.Session()
data = {"email": "admin@knex.com", "password": "admin"}
session.post("http://localhost:5000/api/users/login", data=data)
for file in os.listdir("."):
if file.endswith(".json"):
text = open(file, "r").read()
res = session.post("http://localhost:5000/api/projects", data=text.encode('utf-8'),
headers={'Content-Type': 'application/json'})
print(file + " " + str(res))
|
elif file.endswith(".j
|
son5"):
text = open(file, "r").read()
res = session.post("http://localhost:5000/api/projects", data=text.encode('utf-8'),
headers={'Content-Type': 'application/json5'})
print(file + " " + str(res))
session.get("http://localhost:5000/api/users/logout")
|
danimajo/pineapple_pdf
|
werkzeug/wrappers.py
|
Python
|
mit
| 76,131
| 0.000276
|
# -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
|
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
|
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to cus
|
sbesson/snoopycrimecop
|
test/integration/Sandbox.py
|
Python
|
gpl-2.0
| 5,211
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 University of Dundee & Open Micr
|
oscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PAR
|
TICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import str
from builtins import range
from builtins import object
import os
import uuid
import shutil
import logging
import tempfile
from scc.git import get_github, get_token_or_user
from subprocess import Popen
sandbox_url = "https://github.com/ome/snoopys-sandbox.git"
class SandboxTest(object):
def setup_method(self, method):
# Basic logging configuration so if a test fails we can see
# the statements at WARN or ERROR at least.
logging.basicConfig()
self.method = method.__name__
self.cwd = os.getcwd()
self.token = get_token_or_user(local=False)
self.gh = get_github(self.token, dont_ask=True)
self.user = self.gh.get_login()
self.path = tempfile.mkdtemp("", "sandbox-", ".")
self.path = os.path.abspath(self.path)
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "clone", "-q", sandbox_url, self.path],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
self.sandbox = self.gh.git_repo(self.path)
self.origin_remote = "origin"
except Exception:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
raise
# If we succeed, then we change to this dir.
os.chdir(self.path)
def shortDescription(self):
return None
def init_submodules(self):
"""
Fetch submodules after cloning the repository
"""
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "submodule", "update", "--init"],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
except Exception:
os.chdir(self.path)
raise
def uuid(self):
"""
Return a string representing a uuid.uuid4
"""
return str(uuid.uuid4())
def fake_branch(self, head="master", commits=None):
"""
Return a local branch with a list of commits, defaults to a single
commit adding a unique file
"""
name = self.uuid()
if commits is None:
commits = [(name, "hi")]
self.sandbox.new_branch(name, head=head)
for n in range(len(commits)):
fname, txt = commits[n]
fname = os.path.join(self.path, fname)
with open(fname, 'w') as f:
f.write(txt)
self.sandbox.add(fname)
self.sandbox.commit("%d: Writing %s" % (n, name))
self.sandbox.get_status()
return name
def add_remote(self):
"""
Add the remote of the authenticated Github user
"""
if self.user not in self.sandbox.list_remotes():
remote_url = "https://%s:x-oauth-basic@github.com/%s/%s.git" \
% (self.token, self.user, self.sandbox.origin.name)
self.sandbox.add_remote(self.user, remote_url)
def rename_origin_remote(self, new_name):
"""
Rename the remote used for the upstream repository
"""
self.sandbox.call("git", "remote", "rename", self.origin_remote,
new_name)
self.origin_remote = new_name
def push_branch(self, branch):
"""
Push a local branch to GitHub
"""
self.add_remote()
self.sandbox.push_branch(branch, remote=self.user)
def open_pr(self, branch, base, description=None):
"""
Push a local branch and open a PR against the selected base
"""
self.push_branch(branch)
if description is None:
description = ("This is a call to Sandbox.open_pr by %s" %
self.method)
new_pr = self.sandbox.origin.open_pr(
title="test %s" % branch,
description=description,
base=base,
head="%s:%s" % (self.user, branch))
return new_pr
def teardown_method(self, method):
try:
self.sandbox.cleanup()
finally:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
|
sokanu/frame
|
images/storage.py
|
Python
|
mit
| 3,547
| 0.004229
|
from django.conf import settings
from images.models import S3Connection
from shutil import copyfileobj
import tinys3
import os
import urllib
class LocalStorage(object):
def __init__(self, filename):
self.filename = filename
def get_file_data(self):
"""
Returns the raw data for the specified file
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
# TODO: do you need to close this?
data = open(image_path, 'r').read()
return data
def get_remote_path(self):
"""
Builds a relative remote path by combining the MEDIA_URL setting and the filename
"""
return '%s%s' % (settings.MEDIA_URL, self.filename)
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` to the local storage
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
with open(image_path, 'w') as fw:
copyfileobj(file_instance, fw)
@staticmethod
def create_argument_slug(arguments_dict):
"""
Converts an arguments dictionary into a
|
string that can be stored in a filename
"""
# TODO: is there a possible bug if an invalid key/value is presented?
args_list = ['%s-%s' % (key, value) for key, value in arguments_dict.items()]
return '--'.join(args_list)
class S3Storage(LocalStorage
|
):
def __init__(self, *args, **kwargs):
"""
Overrides the LocalStorage and initializes a shared S3 connection
"""
super(S3Storage, self).__init__(*args, **kwargs)
self.conn = tinys3.Connection(self.S3_ACCESS_KEY, self.S3_SECRET_KEY, default_bucket=self.S3_BUCKET, tls=True)
def get_remote_path(self):
"""
Returns an absolute remote path for the filename from the S3 bucket
"""
return 'https://%s.%s/%s' % (self.conn.default_bucket, self.conn.endpoint, self.filename)
def get_file_data(self):
"""
Returns the raw data for the specific file, downloading it from S3
"""
path = self.get_remote_path()
data = urllib.urlopen(path).read()
return data
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` from memory to S3
"""
self.conn.upload(self.filename, file_instance, content_type=content_type)
@property
def S3_BUCKET(self):
"""
Returns the S3_BUCKET. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_BUCKET', self.database_settings.bucket)
@property
def S3_ACCESS_KEY(self):
"""
Returns the S3_ACCESS_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_ACCESS_KEY', self.database_settings.access_key)
@property
def S3_SECRET_KEY(self):
"""
Returns the S3_SECRET_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_SECRET_KEY', self.database_settings.secret_key)
@property
def database_settings(self):
"""
Pulls an S3Connection instance, which contains S3 connection settings, from the databas. Result is cached locally
"""
if not getattr(self, '__database_settings', None):
self.__database_settings = S3Connection.objects.get()
return self.__database_settings
|
new-player/share_projects
|
share_projects/profiles/models.py
|
Python
|
mit
| 1,201
| 0.000833
|
import os
import uuid
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
def avatar_upload(instance, filename):
ext = filename.split(".")[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join("avatars", filename)
class Profile(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=75, blank=True)
avatar = models.ImageField(upload_to=avatar_upload, blank=True)
bio = models.TextField(blank=True)
affiliation = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100, blank=True)
website = models.CharField(max_length=250, blank=True)
twitter_username = models.CharF
|
ield("Twitter Username", max_length=100, blank=True)
created_at = models.DateTimeField(default=timezone.now)
modif
|
ied_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
self.modified_at = timezone.now()
return super(Profile, self).save(*args, **kwargs)
@property
def display_name(self):
if self.name:
return self.name
else:
return self.user.username
|
greatfireball/PorthoMCL
|
porthomclPairsBestHit.py
|
Python
|
gpl-3.0
| 10,894
| 0.026161
|
#!/usr/bin/python
from datetime import datetime
from collections import namedtuple
import sys, os
import gzip
import random, math
from optparse import OptionParser
options = None
## User for Orthology
best_query_taxon_score = {}
## Used for the Paralogy
BestInterTaxonScore = {}
BetterHit = {}
# class SimilarSequenceLine:
# def __init__(self, line):
# column = line.strip().split('\t')
# self.query_id = column[0]
# (self.query_taxon, self.query_seq) = column[0].split('|')
# self.subject_id = column[1]
# (self.subject_taxon,self.subject_seq) = column[1].split('|')
# self.evalue_mant = float(column[2])
# self.evalue_exp = int(column[3])
# #self.percent_ident = column[4]
# self.percent_match = float(column[4])
class SimilarSequenceLine(namedtuple('SimilarSequenceLine', 'query_id,query_taxon,query_seq,subject_id,subject_taxon,subject_seq,evalue_mant,evalue_exp,percent_match')):
__slots__ = ()
@classmethod
def _fromLine(cls, line, new=tuple.__new__, len=len):
'Make a new SimilarSequenceLine object from a sequence or iterable'
column = line.strip().split('\t')
(query_taxon, query_seq) = column[0].split('|')
(subject_taxon, subject_seq) = column[1].split('|')
iterable = (column[0], query_taxon, query_seq, column[1], subject_taxon, subject_seq, float(
|
column[2]), int(column[3]), float(column[4]))
result = new(cls, iterable)
if len(result) != 9:
raise TypeError('Expected 9 arguments, got %d' % len(result))
return result
|
def readTaxonList(filename):
taxon_list = []
taxon_list_file = open(filename)
for line in taxon_list_file:
line = line.strip()
if line:
taxon_list += [line]
taxon_list_file.close()
return taxon_list
def memory_usage_resource():
import resource
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
return round(mem, 0)
def log(s):
global options
print >> sys.stderr, s
if options.logfile:
l = open(options.logfile, 'a')
l.write(s+'\n')
l.close()
def writeStoOutputFiles(s, out_bh_file):
global best_query_taxon_score, BestInterTaxonScore, options
try:
(cutoff_exp, cutoff_mant) = best_query_taxon_score[(s.query_id, s.subject_taxon)]
if (
s.query_taxon != s.subject_taxon and
s.evalue_exp < options.evalueExponentCutoff and
s.percent_match > options.percentMatchCutoff and
(s.evalue_mant < 0.01 or s.evalue_exp==cutoff_exp and s.evalue_mant==cutoff_mant)
):
out_bh_file.write('{0}\t{1}\t{2}\t{3}\n'.format(s.query_seq, s.subject_id, s.evalue_exp, s.evalue_mant))
except KeyError:
pass
if options.outInParalogTempFolder:
try:
(cutoff_exp, cutoff_mant) = BestInterTaxonScore[s.query_id]
if (s.query_taxon == s.subject_taxon and
s.query_id != s.subject_id and
s.evalue_exp <= options.evalueExponentCutoff and
s.percent_match >= options.percentMatchCutoff and
(s.evalue_mant < 0.01 or s.evalue_exp<cutoff_exp or (s.evalue_exp == cutoff_exp and s.evalue_mant<=cutoff_mant))
):
# try:
# BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)]
# except KeyError:
BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant)
except KeyError:
# Include the ones with
if (
s.query_taxon == s.subject_taxon and
(options.keepOrthoMCLBug or s.query_id != s.subject_id) and #### THIS IS an OrthoMCL bug
s.evalue_exp <= options.evalueExponentCutoff and
s.percent_match >= options.percentMatchCutoff
):
# try:
# BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)]
# except KeyError:
BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant)
if __name__ == '__main__':
usage = "This is STEP 5.1 of PorthoMCL.\n\nusage: %prog options\n"
parser = OptionParser(usage)
parser.add_option("-t", "--taxonlist", dest="taxonlistfile", help="A single column file containing the list of taxon to work with")
parser.add_option("-x", "--index", dest="index", help="An integer number identifying which taxon to work on [1-size_of_taxon_list]", type='int')
parser.add_option('-s', '--inSimSeq', dest='inSimSeq', help='Input folder that contains split similar sequences files (ss files)')
parser.add_option('-b', '--outBestHitFolder', dest='outBestHitFolder', help='folder that will stores Best Hit files (If not set, current folder)')
parser.add_option('-q', '--outInParalogTempFolder', dest='outInParalogTempFolder', help='folder to generate best InParalogTemp evalue scores (pt files) (required only for Paralogs)')
parser.add_option("-l", "--logfile", dest="logfile", help="log file (optional, if not supplied STDERR will be used)")
parser.add_option('', '--evalueExponentCutoff', dest='evalueExponentCutoff', help='evalue Exponent Cutoff (a nebative value, default=-5)', default=-5, type='int')
parser.add_option('', '--percentMatchCutoff', dest='percentMatchCutoff', help='percent Match Cutoff (integer value, default=50)', default=50, type='int')
parser.add_option('', '--cacheInputFile', dest='cacheInputFile', help='Cache input file or read it again. (Only use if I/O is very slow)', default=False, action="store_true")
parser.add_option('', '--keepOrthoMCLBug', dest='keepOrthoMCLBug', help='Keep the OrthoMCL bug in creating Temporary Paralogs files (pt files) where self hits are included', default=False, action="store_true")
#
(options, args) = parser.parse_args()
if len(args) != 0 or not options.taxonlistfile or not options.inSimSeq or not options.index:
parser.error("incorrect arguments.\n\t\tUse -h to get more information or refer to the MANUAL.md")
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(1, 'reading taxon list', options.index, '', memory_usage_resource(), datetime.now()))
taxon_list = readTaxonList(options.taxonlistfile)
if options.index <= 0 or options.index > len(taxon_list):
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('ERROR', 'Error in index', options.index, '', memory_usage_resource(), datetime.now()))
exit()
taxon1s = taxon_list[options.index - 1]
if options.cacheInputFile:
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('OPTION', 'Caching Input files', options.index, taxon1s, memory_usage_resource(), datetime.now()))
log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(2, 'Reading similar sequences (ss file)', options.index, taxon1s, memory_usage_resource(), datetime.now()))
if options.outBestHitFolder and not os.path.exists(options.outBestHitFolder):
os.makedirs(options.outBestHitFolder)
if options.outInParalogTempFolder and not os.path.exists(options.outInParalogTempFolder):
os.makedirs(options.outInParalogTempFolder)
input_file_cache = []
with open(os.path.join(options.inSimSeq, taxon1s+'.ss.tsv')) as input_file:
for line in input_file:
ss = SimilarSequenceLine._fromLine(line)
if options.cacheInputFile:
input_file_cache += [ss]
if ss.query_taxon != ss.subject_taxon:
try:
best_query_taxon_score[(ss.query_id, ss.subject_taxon)] += [(ss.evalue_mant, ss.evalue_exp)]
except:
best_query_taxon_score[(ss.query_id, ss.subject_taxon)] = [(ss.evalue_mant, ss.evalue_exp)]
for (query_id,subject_taxon) in best_query_taxon_score:
evalues = best_query_taxon_score[(query_id, subject_taxon)]
min_exp = sys.maxint #min(evalues, key = lambda t: t[1])
min_mants = []
for (evalue_mant, evalue_exp) in evalues:
if evalue_exp < min_exp:
min_exp = evalue_exp
min_mants += [evalue_mant]
if evalue_mant == 0 and evalue_exp == 0:
min_mants += [evalue_mant]
best_query_taxon_score[(query_id,subject_taxon)] = (min_exp, min(min_mants))
if options.outInParalogTempFolder:
# log('{2} | Best Hit | {0} | {1} | * | {3} MB | {4}'.format(3 , 'Creating bestQueryTaxonScore (q-t file)', options.index, memory_usage_resource(), datetime.now() ))
# with open(os.path.join(options.outQueryTaxonScoreFolder, taxon1s+'.q-t.tsv'), 'w') as out_file:
# for (query_id,subject_taxon) in sorted(
|
fonnesbeck/geopandas
|
geopandas/plotting.py
|
Python
|
bsd-3-clause
| 10,488
| 0.003337
|
from __future__ import print_function
import numpy as np
from six import next
from six.moves import xrange
def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(poly.exterior)
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha))
ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor, linewidth=linewidth)
def plot_multipolygon(ax, geom, facecolor='red', alpha=0.5, linewidth=1):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, alpha=alpha, linewidth=linewidth)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, alpha=alpha, linewidth=linewidth)
def plot_linestring(ax, geom, color='black', linewidth=1):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:,0], a[:,1], color=color, linewidth=linewidth)
def plot_multilinestring(ax, geom, color='red', linewidth=1):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom.type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth)
elif geom.type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth)
def plot_point(ax, pt, marker='o', markersize=2):
""" Plot a single Point geometry """
ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, colormap='Set1', alpha=0.5, linewidth=1.0, axes=None):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
alpha : float (default 0.5)
Alpha value for polygon fill regions. Has no effect for
lines or points.
linewidth : float (default 1.0)
Line width for geometries.
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
if axes == None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
color = gencolor(len(s), colormap=colormap)
for geom in s:
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=next(color), alpha=alpha, linewidth=linewidth)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=next(color), linewidth=linewidth)
elif geom.type == 'Point':
plot_point(ax, geom)
plt.draw()
return ax
def plot_dataframe(s, column=None, colormap=None, alpha=0.5, linewidth=1.0,
categorical=False, legend=False, axes=None, scheme=None,
vmin=None, vmax=None,
k=5):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted.
categorical : bool (default False)
If False, colormap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
alpha : float (default 0.5)
Alpha value for polygon fill regions. Has no effect for
lines or points.
linewidth : float (default 1.0)
Line width for geometries.
legend : bool (default False)
Plot a legend (Experimental; currently for categorical
plots only)
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes
vmin : float
Minimum value for color map
vmax : float
Maximum value for color map
k : int (default 5)
Number of classes (ignored if scheme is None)
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
from matplotlib.line
|
s import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, colormap=colormap, alpha=alpha,
linewidth=linewidth, axes=axes)
else:
if s[column].dtype is np.dtype('O'):
categorical = True
if categorical:
if colormap is None:
colormap
|
= 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = [valuemap[k] for k in s[column]]
else:
values = s[column]
if scheme is not None:
values = __pysal_choro(values, scheme, k=k)
cmap = norm_cmap(values, colormap, Normalize, cm, mn=vmin, mx=vmax)
if axes == None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
for geom, value in zip(s.geometry, values):
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value),
alpha=alpha, linewidth=linewidth)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=cmap.to_rgba(value), linewidth=linewidth)
# TODO: color point geometries
elif geom.type == 'Point':
plot_point(ax, geom)
if legend:
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=alpha,
markersize=10, markerfacecolor=cmap.to_rgba(value)))
|
NetApp/manila
|
manila/tests/share/drivers/test_ganesha.py
|
Python
|
apache-2.0
| 13,337
| 0
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import os
import ddt
import mock
from oslo_config import cfg
from manila import exception
from manila.share import configuration as config
from manila.share.drivers import ganesha
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
fake_basepath = '/fakepath'
fake_export_name = 'fakename--fakeaccid'
fake_output_template = {
'EXPORT': {
'Export_Id': 101,
'Path': '/fakepath/fakename',
'Pseudo': '/fakepath/fakename--fakeaccid',
'Tag': 'fakeaccid',
'CLIENT': {
'Clients': '10.0.0.1'
},
'FSAL': 'fakefsal'
}
}
@ddt.ddt
class GaneshaNASHelperTestCase(test.TestCase):
"""Tests GaneshaNASHElper."""
def setUp(self):
super(GaneshaNASHelperTestCase, self).setUp()
CONF.set_default('ganesha_config_path', '/fakedir0/fakeconfig')
CONF.set_default('ganesha_db_path', '/fakedir1/fake.db')
CONF.set_default('ganesha_export_dir', '/fakedir0/export.d')
CONF.set_default('ganesha_export_template_dir',
'/fakedir2/faketempl.d')
CONF.set_default('ganesha_service_name', 'ganesha.fakeservice')
self._execute = mock.Mock(return_value=('', ''))
self.fake_conf = config.Configuration(None)
self.fake_conf_dir_path = '/fakedir0/exports.d'
self._helper = ganesha.GaneshaNASHelper(
self._execute, self.fake_conf, tag='faketag')
self._helper.ganesha = mock.Mock()
self._helper.export_template = {'key': 'value'}
self.share = fake_share.fake_share()
self.access = fake_share.fake_access()
def test_load_conf_dir(self):
fake_template1 = {'key': 'value1'}
fake_template2 = {'key': 'value2'}
fake_ls_dir = ['fakefile0.conf', 'fakefile1.json', 'fakefile2.txt']
mock_ganesha_utils_patch = mock.Mock()
def fake_patch_run(tmpl1, tmpl2):
mock_ganesha_utils_patch(
copy.deepcopy(tmpl1), copy.deepcopy(tmpl2))
tmpl1.update(tmpl2)
self.mock_object(ganesha.os, 'listdir',
mock.Mock(return_value=fake_ls_dir))
self.mock_object(ganesha.LOG, 'info')
self.mock_object(ganesha.ganesha_manager, 'parseconf',
mock.Mock(side_effect=[fake_template1,
fake_template2]))
self.mock_object(ganesha.ganesha_utils, 'patch',
mock.Mock(side_effect=fake_patch_run))
with mock.patch('six.moves.builtins.open',
mock.mock_open()) as mockopen:
mockopen().read.side_effect = ['fakeconf0', 'fakeconf1']
ret = self._helper._load_conf_dir(self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(
self.fake_conf_dir_path)
ganesha.LOG.info.assert_called_once_with(
mock.ANY, self.fake_conf_dir_path)
mockopen.assert_has_calls([
mock.call('/fakedir0/exports.d/fakefile0.conf'),
mock.call('/fakedir0/exports.d/fakefile1.json')],
any_order=True)
ganesha.ganesha_manager.parseconf.assert_has_calls([
mock.call('fakeconf0'), mock.call('fakeconf1')])
mock_ganesha_utils_patch.assert_has_calls([
mock.call({}, fake_template1),
mock.call(fake_template1, fake_template2)])
self.assertEqual(fake_template2, ret)
def test_load_conf_dir_no_conf_dir_mu
|
st_exist_false(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT))))
self.mock_object(ganesha.LOG, 'info')
self.mock_object(ganesha.ganesha_manager, 'parseconf')
|
self.mock_object(ganesha.ganesha_utils, 'patch')
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data='fakeconf')) as mockopen:
ret = self._helper._load_conf_dir(self.fake_conf_dir_path,
must_exist=False)
ganesha.os.listdir.assert_called_once_with(
self.fake_conf_dir_path)
ganesha.LOG.info.assert_called_once_with(
mock.ANY, self.fake_conf_dir_path)
self.assertFalse(mockopen.called)
self.assertFalse(ganesha.ganesha_manager.parseconf.called)
self.assertFalse(ganesha.ganesha_utils.patch.called)
self.assertEqual({}, ret)
def test_load_conf_dir_error_no_conf_dir_must_exist_true(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT))))
self.assertRaises(OSError, self._helper._load_conf_dir,
self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_load_conf_dir_error_conf_dir_present_must_exist_false(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=OSError(errno.EACCES,
os.strerror(errno.EACCES))))
self.assertRaises(OSError, self._helper._load_conf_dir,
self.fake_conf_dir_path, must_exist=False)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_load_conf_dir_error(self):
self.mock_object(
ganesha.os, 'listdir',
mock.Mock(side_effect=RuntimeError('fake error')))
self.assertRaises(RuntimeError, self._helper._load_conf_dir,
self.fake_conf_dir_path)
ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path)
def test_init_helper(self):
mock_template = mock.Mock()
mock_ganesha_manager = mock.Mock()
self.mock_object(ganesha.ganesha_manager, 'GaneshaManager',
mock.Mock(return_value=mock_ganesha_manager))
self.mock_object(self._helper, '_load_conf_dir',
mock.Mock(return_value=mock_template))
self.mock_object(self._helper, '_default_config_hook')
ret = self._helper.init_helper()
ganesha.ganesha_manager.GaneshaManager.assert_called_once_with(
self._execute, 'faketag',
ganesha_config_path='/fakedir0/fakeconfig',
ganesha_export_dir='/fakedir0/export.d',
ganesha_db_path='/fakedir1/fake.db',
ganesha_service_name='ganesha.fakeservice')
self._helper._load_conf_dir.assert_called_once_with(
'/fakedir2/faketempl.d', must_exist=False)
self.assertFalse(self._helper._default_config_hook.called)
self.assertEqual(mock_ganesha_manager, self._helper.ganesha)
self.assertEqual(mock_template, self._helper.export_template)
self.assertIsNone(ret)
def test_init_helper_conf_dir_empty(self):
mock_template = mock.Mock()
mock_ganesha_manager = mock.Mock()
self.mock_object(ganesha.ganesha_manager, 'GaneshaManager',
mock.Mock(return_value=mock_ganesha_manager))
self.mock_object(self._helper, '_load_conf_dir',
mock.Mock(return_value={}))
self.mock_object(self._helper, '_default_config_hook',
mock.Mock
|
adamnovak/hgvm-builder
|
setup.py
|
Python
|
apache-2.0
| 2,450
| 0.003673
|
# setup.py: based off setup.py for toil-vg, modified to install this pipeline
# instead.
import sys
import os
# Get the local version.py and not any other version module
execfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "version.py"))
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
kwargs = dict(
name='hgvm-builder',
version=version,
description="Human Genome Variation Map construction kit",
author='Adam Novak',
author_email='anovak@soe.ucsc.edu',
url="https://github.com/BD2KGenomics/hgvm-builder",
install_requires=[package + ver for package, ver in required_versions.iteritems()],
dependency_links = dependency_links,
tests_require=['pytest==2.8.3'],
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={
'console_scripts': [
'build-hgvm = hgvmbuilder.build:entrypoint',
'copy-hgvm = hgvmbuilder.parallelcopy:entrypoint',
'import-sam-hgvm = hgvmbuilder.importsam:entrypoint'
]})
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
# Sanitize command line arguments to avoid confusing Toil code
# attempting to parse them
sys.arg
|
v[1:] = []
errno = pytest.main(self.pytest_args)
sys.exit(errno)
kwargs['cmdclass'] = {'test': PyTest}
setup(**kwargs)
# Wen we run setup, tell the user they need a good Toil with cloud support
print("""
Thank you for installing the hgvm-builder pipeline!
If you want to run this Toil-based pipeline on a clu
|
ster in a cloud, please
install Toil with the appropriate extras. For example, To install AWS/EC2
support for example, run
pip install toil[aws,mesos]{}
on every EC2 instance. For Microsoft Azure, deploy your cluster using the Toil
template at
https://github.com/BD2KGenomics/toil/tree/master/contrib/azure
For more information, please refer to Toil's documentation at
http://toil.readthedocs.io/en/latest/installation.html
To start building HGVMs, run
build-hgvm --help 2>&1 | less
""".format(required_versions['toil']))
|
pblottiere/QGIS
|
tests/src/python/test_qgsmultiedittoolbutton.py
|
Python
|
gpl-2.0
| 2,332
| 0
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMultiEditToolButton.
.. note:: This program is free software;
|
you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Found
|
ation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '16/03/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.gui import QgsMultiEditToolButton
from qgis.testing import start_app, unittest
start_app()
class TestQgsMultiEditToolButton(unittest.TestCase):
def test_state_logic(self):
"""
Test that the logic involving button states is correct
"""
w = QgsMultiEditToolButton()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# set is changed should update state to changed
w.setIsChanged(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.Changed)
w.setIsChanged(False)
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# resetting changes should fall back to default state
w.setIsChanged(True)
w.resetChanges()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# setting changes committed should result in default state
w.setIsChanged(True)
w.changesCommitted()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
# Test with mixed values
w.setIsMixed(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# changed state takes priority over mixed state
w.setIsChanged(True)
self.assertEqual(w.state(), QgsMultiEditToolButton.Changed)
w.setIsChanged(False)
# should reset to mixed state
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# resetting changes should fall back to mixed state
w.setIsChanged(True)
w.resetChanges()
self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues)
# setting changes committed should result in default state
w.setIsChanged(True)
w.changesCommitted()
self.assertEqual(w.state(), QgsMultiEditToolButton.Default)
if __name__ == '__main__':
unittest.main()
|
DarKnight24/owtf
|
framework/db/mapping_manager.py
|
Python
|
bsd-3-clause
| 3,644
| 0.001647
|
import os
import json
import logging
import ConfigParser
from framework.db import models
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import MappingDBInterface
from framework.lib.exceptions import InvalidMappingReference
class MappingDB(BaseComponent, MappingDBInterface):
COMPONENT_NAME = "mapping_db"
def __init__(self):
"""
The mapping_types attributes contain the unique mappings in memory
"""
self.register_in_service_locator()
self.config = self.get_component("config")
self.db = self.get_component("db")
self.mapping_types = []
self.error_handler = self.get_component("error_handler")
def init(self):
self.LoadMappingDBFromFile(self.config.get_profile_path("MAPPING_PROFILE"))
def LoadMappingDBFromFile(self, file_path):
"""
This needs to be a list instead of a dictionary to preserve order in
python < 2.7
"""
file_path = self.config.select_user_or_default_config_path(file_path)
logging.info("Loading Mapping from: %s..", file_path)
config_parser = ConfigParser.RawConfigParser()
# Otherwise all the keys are converted to lowercase xD
config_parser.optionxform = str
if not os.path.isfile(file_path): # check if the mapping file exists
self.error_handler.FrameworkAbort("Mapping file not found at: %s" % file_path)
config_parser.read(file_path)
for owtf_code in config_parser.sections():
mappings = {}
category = None
for mapping_type, data in config_parser.items(owtf_code):
if mapping_type != 'category':
if mapping_type not in self.mapping_types:
self.mapping_types.append(mapping_type)
mapped_code, mapped_name = data.split('_____')
mappings[mapping_type] = [mapped_code, mapped_name]
else:
category = data
self.db.session.merge(models.Mapping(owtf_code=owtf_code, mappings=json.dumps(mappings), category=category))
self.db.session.commit()
def DeriveMappingDict(self, obj):
if obj:
pdict = dic
|
t(obj.__dict__)
|
pdict.pop("_sa_instance_state", None)
# If output is present, json decode it
if pdict.get("mappings", None):
pdict["mappings"] = json.loads(pdict["mappings"])
return pdict
def DeriveMappingDicts(self, obj_list):
dict_list = []
for obj in obj_list:
dict_list.append(self.DeriveMappingDict(obj))
return dict_list
def GetMappingTypes(self):
"""
In memory data saved when loading db
"""
return self.mapping_types
def GetMappings(self, mapping_type):
if mapping_type in self.mapping_types:
mapping_objs = self.db.session.query(models.Mapping).all()
mappings = {}
for mapping_dict in self.DeriveMappingDicts(mapping_objs):
if mapping_dict["mappings"].get(mapping_type, None):
mappings[mapping_dict["owtf_code"]] = mapping_dict["mappings"][mapping_type]
return mappings
else:
raise InvalidMappingReference("InvalidMappingReference %s requested" % mapping_type)
def GetCategory(self, plugin_code):
category = self.db.session.query(models.Mapping.category).get(plugin_code)
# Getting the corresponding category back from db
return category
|
wary/zeppelin
|
python/src/main/resources/python/zeppelin_python.py
|
Python
|
apache-2.0
| 9,381
| 0.012685
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.getGui().input(name, defaultValue)
def select(self, name, options, defaultValue=""):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return self.z.getGui().select(name, defaultValue, javaOptions)
def checkbox(self, name, options, defaultChecked=[]):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
javaDefaultCheck = self.javaList()
for check in defaultChecked:
javaDefaultCheck.append(check)
return self.z.getGui().checkbox(name, javaDefaultCheck, javaOptions)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
|
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width=
|
{width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nho
|
c3nav/c3nav
|
src/c3nav/editor/models/__init__.py
|
Python
|
apache-2.0
| 200
| 0
|
from c3nav.edit
|
or.models
|
.changedobject import ChangedObject # noqa
from c3nav.editor.models.changeset import ChangeSet # noqa
from c3nav.editor.models.changesetupdate import ChangeSetUpdate # noqa
|
hzlf/openbroadcast.ch
|
app/remoteauth/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 3,171
| 0.00473
|
# -*- coding: utf-8 -*
|
-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
impor
|
t django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('remote_id', models.IntegerField(null=True, blank=True)),
('remote_uri', models.CharField(max_length=256, null=True, blank=True)),
('profile_uri', models.CharField(max_length=256, null=True, blank=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
CSE-SOE-CUSAT/NOSLab
|
CSA/unsorted/username/client.py
|
Python
|
mit
| 1,125
| 0.013333
|
import socket
from heapq import heappush, heappop, heapify
from collections
|
import defaultdict
##defbig
def encode(symb2freq):
"""Huffman encode the giv
|
en dict mapping symbols to weights"""
heap = [[wt, [sym, ""]] for sym, wt in symb2freq.items()]
heapify(heap)
while len(heap) > 1:
lo = heappop(heap)
hi = heappop(heap)
for pair in lo[1:]:
pair[1] = '1' + pair[1]
for pair in hi[1:]:
pair[1] = '0' + pair[1]
heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])
return sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p))
##defend
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 1743
s.connect((host, port))
s.send("#BEGIN")
s.send("!")
f = open('a.txt', 'r')
#for line in f.readlines():
txt = 'mississippi river'
symb2freq = defaultdict(int)
for ch in txt:
symb2freq[ch] += 1
huff = encode(symb2freq)
for p in huff:
s.send("{0},{1},{2}".format(p[0], symb2freq[p[0]], p[1]))
s.send("#END")
s.close()
##############3/////////////
|
hail-is/hail
|
auth/auth/auth.py
|
Python
|
mit
| 25,475
| 0.002198
|
import asyncio
import json
import logging
import os
from typing import List, Optional
import aiohttp
import aiohttp_session
import uvloop
from aiohttp import web
from prometheus_async.aio.web import server_stats # type: ignore
from gear import (
Database,
Transaction,
check_csrf_token,
create_session,
maybe_parse_bearer_header,
monitor_endpoints_middleware,
rest_authenticated_developers_only,
rest_authenticated_users_only,
setup_aiohttp_session,
transaction,
web_authenticated_developers_only,
web_authenticated_users_only,
web_maybe_authenticated_user,
)
from gear.cloud_config import get_global_config
from hailtop import httpx
from hailtop.config import get_deploy_config
from hailtop.hail_logging import AccessLogger
from hailtop.tls import internal_server_ssl_context
from hailtop.utils import secret_alnum_string
from web_common import render_template, set_message, setup_aiohttp_jinja2, setup_common_static_routes
from .exceptions import (
AuthUserError,
DuplicateLoginID,
DuplicateUsername,
EmptyLoginID,
InvalidType,
InvalidUsername,
MultipleExistingUsers,
MultipleUserTypes,
PreviouslyDeletedUser,
UnknownUser,
)
from .flow import get_flow_client
log = logging.getLogger('auth')
uvloop.install()
CLOUD = get_global_config()['cloud']
ORGANIZATION_DOMAIN = os.environ['HAIL_ORGANIZATION_DOMAIN']
deploy_config = get_deploy_config()
routes = web.RouteTableDef()
async def user_from_login_id(db, login_id):
users = [x async for x in db.select_and_fetchall("SELECT * FROM users WHERE login_id = %s;", login_id)]
if len(users) == 1:
return users[0]
assert len(users) == 0, users
return None
async def users_with_username_or_login_id(tx: Transaction, username: str, login_id: Optional[str]) -> List[dict]:
where_conditions = ['username = %s']
where_args = [username]
if login_id is not None:
where_conditions.append('login_id = %s')
where_args.append(login_id)
existing_users = [
x
async for x in tx.execute_and_fetchall(
f"SELECT * FROM users WHERE {' OR '.join(where_conditions)} LOCK IN SHARE MODE;", where_args
)
]
return existing_users
async def check_valid_new_user(tx: Transaction, username, login_id, is_developer, is_service_account) -> Optional[dict]:
if not isinstance(username, str):
raise InvalidType('username', username, 'str')
if login_id is not None and not isinstance(login_id, str):
raise InvalidType('login_id', login_id, 'str')
if not isinstance(is_developer, bool):
raise InvalidType('is_developer', is_developer, 'bool')
if not isinstance(is_service_account, bool):
raise InvalidType('is_service_account', is_service_account, 'bool')
if is_developer and is_service_account:
raise MultipleUserTypes(username)
if not is_service_account and not login_id:
raise EmptyLoginID(username)
if not username or not all(c for c in
|
username if c.isalnum()):
raise InvalidUsername(username)
existing_users = await users_with_username_or_login_id(tx, username, login_id)
if len(existing_users) > 1:
raise MultipleExistingUsers(username, login_id)
if len(existing_users) == 1:
existing_user = existing_users[0
|
]
expected_username = existing_user['username']
expected_login_id = existing_user['login_id']
if username != expected_username:
raise DuplicateLoginID(expected_username, login_id)
if login_id != expected_login_id:
raise DuplicateUsername(username, expected_login_id)
if existing_user['state'] in ('deleting', 'deleted'):
raise PreviouslyDeletedUser(username)
return existing_user
return None
async def insert_new_user(
db: Database, username: str, login_id: Optional[str], is_developer: bool, is_service_account: bool
) -> bool:
@transaction(db)
async def _insert(tx):
existing_user = await check_valid_new_user(tx, username, login_id, is_developer, is_service_account)
if existing_user is not None:
return False
await tx.execute_insertone(
'''
INSERT INTO users (state, username, login_id, is_developer, is_service_account)
VALUES (%s, %s, %s, %s, %s);
''',
('creating', username, login_id, is_developer, is_service_account),
)
await _insert() # pylint: disable=no-value-for-parameter
return True
def cleanup_session(session):
def _delete(key):
if key in session:
del session[key]
_delete('pending')
_delete('login_id')
_delete('next')
_delete('caller')
_delete('session_id')
_delete('flow')
@routes.get('/healthcheck')
async def get_healthcheck(request): # pylint: disable=W0613
return web.Response()
@routes.get('')
@routes.get('/')
async def get_index(request): # pylint: disable=unused-argument
return aiohttp.web.HTTPFound(deploy_config.external_url('auth', '/login'))
@routes.get('/creating')
@web_maybe_authenticated_user
async def creating_account(request, userdata):
db = request.app['db']
session = await aiohttp_session.get_session(request)
if 'pending' in session:
login_id = session['login_id']
user = await user_from_login_id(db, login_id)
nb_url = deploy_config.external_url('notebook', '')
next_page = session.pop('next', nb_url)
cleanup_session(session)
if user is None:
set_message(session, f'Account does not exist for login id {login_id}.', 'error')
return aiohttp.web.HTTPFound(nb_url)
page_context = {'username': user['username'], 'state': user['state'], 'login_id': user['login_id']}
if user['state'] == 'deleting' or user['state'] == 'deleted':
return await render_template('auth', request, userdata, 'account-error.html', page_context)
if user['state'] == 'active':
session_id = await create_session(db, user['id'])
session['session_id'] = session_id
set_message(session, f'Account has been created for {user["username"]}.', 'info')
return aiohttp.web.HTTPFound(next_page)
assert user['state'] == 'creating'
session['pending'] = True
session['login_id'] = login_id
session['next'] = next_page
return await render_template('auth', request, userdata, 'account-creating.html', page_context)
return aiohttp.web.HTTPUnauthorized()
@routes.get('/creating/wait')
async def creating_account_wait(request):
session = await aiohttp_session.get_session(request)
if 'pending' not in session:
raise web.HTTPUnauthorized()
return await _wait_websocket(request, session['login_id'])
async def _wait_websocket(request, login_id):
app = request.app
db = app['db']
user = await user_from_login_id(db, login_id)
if not user:
return web.HTTPNotFound()
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
count = 0
while count < 10:
try:
user = await user_from_login_id(db, login_id)
assert user
if user['state'] != 'creating':
log.info(f"user {user['username']} is no longer creating")
break
except asyncio.CancelledError:
raise
except Exception: # pylint: disable=broad-except
log.exception(f"/creating/wait: error while updating status for user {user['username']}")
await asyncio.sleep(1)
count += 1
if count >= 10:
log.info(f"user {user['username']} is still in state creating")
ready = user['state'] == 'active'
await ws.send_str(str(int(ready)))
return ws
finally:
await ws.close()
@routes.get('/signup')
async def signup(request):
next_page = request.query.get('next', deploy_config.external_url('notebook', ''))
flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2call
|
postvakje/sympy
|
sympy/physics/vector/frame.py
|
Python
|
bsd-3-clause
| 31,125
| 0.001157
|
from sympy import (diff, trigsimp, expand, sin, cos, solve, Symbol, sympify,
eye, symbols, Dummy, ImmutableMatrix as Matrix, MatrixBase)
from sympy.core.compatibility import string_types, range
from sympy.physics.vector.vector import Vector, _check_vector
__all__ = ['CoordinateSym', 'ReferenceFrame']
class CoordinateSym(Symbol):
"""
A coordinate symbol/base scalar associated wrt a Reference Frame.
Ideally, users should not instantiate this class. Instances of
this class must only be accessed through the corresponding frame
as 'frame[index]'.
CoordinateSyms having the same frame and index parameters are equal
(even though they may be instantiated separately).
Parameters
==========
name : string
The display name of the CoordinateSym
frame : ReferenceFrame
The reference frame this base scalar belongs to
index : 0, 1 or 2
The index of the dimension denoted by this coordinate variable
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, CoordinateSym
>>> A = ReferenceFrame('A')
>>> A[1]
A_y
>>> type(A[0])
<class 'sympy.physics.vector.frame.CoordinateSym'>
>>> a_y = CoordinateSym('a_y', A, 1)
>>> a_y == A[1]
True
"""
def __new__(cls, name, frame, index):
# We can't use the cached Symbol.__new__ because this class depends on
# frame and index, which are not passed to Symbol.__xnew__.
assumptions = {}
super(CoordinateSym, cls)._sanitize(assumptions, cls)
obj = super(CoordinateSym, cls).__xnew__(cls, name, **assumptions)
_check_frame(frame)
if index not in range(0, 3):
raise ValueError("Invalid index specified")
obj._id = (frame, index)
return obj
@property
def frame(self):
return self._id[0]
def __eq__(self, other):
#Check if the other object is a CoordinateSym of the same frame
#and same index
if isinstance(other, CoordinateSym):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return tuple((self._id[0].__hash__(), self._id[1])).__hash__()
class ReferenceFrame(object):
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : list (of strings)
If custom indices are desired for console, pretty, and LaTeX
printing, supply three as a list. The basis vectors can then be
accessed with the get_item method.
latexs : list (of strings)
If custom names are desired for LaTeX printing of each basis
vector, supply the names here in a list.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
"""
if not isinstance(name, string_types):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, string_types):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + u"_" + indices[0]),
(name.lower() + u"_" + indices[1]),
(name.lower() + u"_" + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + u"_x",
name.lower() + u"_y",
name.lower() + u"_z"]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, string_types):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
#The _dcm_dict dictionary will only store the dcms of parent-child
#relationships. The _dcm_cache dictionary will work as the dcm
#cache.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
#Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, string_types):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0), \
CoordinateSym(variables[1], self, 1), \
CoordinateSym(variables[2], self, 2))
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not is
|
instance(ind, str):
if ind < 3:
return s
|
elf.varlist[ind]
else:
|
paolodedios/pybuilder
|
src/main/python/pybuilder/_vendor/pkg_resources/_vendor/packaging/_musllinux.py
|
Python
|
apache-2.0
| 4,378
| 0.000457
|
"""PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
import contextlib
import functools
import operator
import os
import re
import struct
import subprocess
import sys
from typing import IO, Iterator, NamedTuple, Optional, Tuple
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
"""Detect musl libc location by parsing the Python executable.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
f.seek(0)
try:
ident = _read_unpacked(f, "16B")
except struct.error:
return None
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
return None
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, p_fmt, p_idx = {
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
}[ident[4]]
except KeyError:
return None
else:
p_get = operator.itemgetter(*p_idx)
# Find the interpreter section and return its content.
try:
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
except struct.error:
return None
for i in range(e_phnum + 1):
f.seek(e_phoff + e_phentsize * i)
try:
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
except struct.error:
return None
if p_type != 3: # Not PT_INTERP.
continue
f.seek(p_offset)
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
if "musl" not in interpreter:
return None
return interpreter
return None
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
with contextlib.ExitStack() as stack:
try:
f = stack.enter_context(open(executable, "rb"))
except IOError:
return None
ld = _parse_ld_musl_from_elf(f)
if not ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
|
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-
|
1])):
print(t, end="\n ")
|
pblottiere/QGIS
|
tests/src/python/test_qgssettings.py
|
Python
|
gpl-2.0
| 23,013
| 0.003525
|
# -*- coding: utf-8 -*-
"""
Test the QgsSettings class
Run with: ctest -V -R PyQgsSettings
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import tempfile
from qgis.core import QgsSettings, QgsTolerance, QgsMapLayerProxyModel
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QSettings, QVariant
from pathlib import Path
__author__ = 'Alessandro Pasotti'
__date__ = '02/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
start_app()
class TestQgsSettings(unittest.TestCase):
cnt = 0
def setUp(self):
self.cnt += 1
h, path = tempfile.mkstemp('.ini')
Path(path).touch()
assert QgsSettings.setGlobalSettingsPath(path)
self.settings = QgsSettings('testqgissettings', 'testqgissettings%s' % self.cnt)
self.globalsettings = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat)
self.globalsettings.sync()
assert os.path.exists(self.globalsettings.fileName())
def tearDown(self):
settings_file = self.settings.fileName()
settings_default_file = self.settings.globalSettingsPath()
del(self.settings)
try:
os.unlink(settings_file)
except:
pass
try:
os.unlink(settings_default_file)
except:
pass
def addToDefaults(self, key, value):
self.globalsettings.setValue(key, value)
self.globalsettings.sync()
def addArrayToDefaults(self, prefix, key, values):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginWriteArray(prefix)
i = 0
for v in values:
self.globalsettings.setArrayIndex(i)
self.globalsettings.setValue(key, v)
i += 1
self.globalsettings.endArray()
self.globalsettings.sync()
def addGroupToDefaults(self, prefix, kvp):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginGroup(prefix)
for k, v in kvp.items():
self.globalsettings.setValue(k, v)
self.globalsettings.endGroup()
self.globalsettings.sync()
def test_basic_functionality(self):
self.assertEqual(self.settings.value('testqgissettings/doesnotexists', 'notexist'), 'notexist')
self.settings.setValue('testqgissettings/name', 'qgisrocks')
self.settings.sync()
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_defaults(self):
self.assertIsNone(self.settings.value('testqgissettings/name'))
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_allkeys(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.addToDefaults('testqgissettings/name2', 'qgisrocks2')
self.settings.setValue('nepoti/eman', 'osaple')
self.assertEqual(3, len(self.settings.allKeys()))
self.assertIn('testqgissettings/name', self.settings.allKeys())
self.assertIn('nepoti/eman', self.settings.allKeys())
self.assertEqual('qgisrocks', self.settings.value('testqgissettings/name'))
self.assertEqual('qgisrocks2', self.settings.value('testqgissettings/name2'))
self.assertEqual('qgisrocks', self.globalsettings.value('testqgissettings/name'))
self.assertEqual('osaple', self.settings.value('nepoti/eman'))
self.assertEqual(3, len(self.settings.allKeys()))
self.assertEqual(2, len(self.globalsettings.allKeys()))
def test_precedence_simple(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.settings.setValue('testqgissettings/names/name1', 'qgisrocks-1')
self.assertEqual(self.settings.value('testqgissettings/names/name1'), 'qgisrocks-1')
def test_precedence_group(self):
"""Test if user can override a group value"""
self.assertEqual(self.settings.allKeys(), [])
self.addGroupToDefaults('connections-xyz', {
'OSM': 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png',
'OSM-b': 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png',
})
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override edit
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: the global value will be resumed!!!
self.settings.beginGroup('connections-xyz')
self.settings.remove('OSM')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a blank!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', '')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), '')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a None: will resume the global setting!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', None)
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
def test_uft8(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgisset
|
tings/names/namèé↓1', 'qgisrocks↓1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓1')
self.settings.setValue('testqgissettings/names/namèé↓2', 'qgisrocks↓2')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓2'), 'qgisrocks↓2')
self.settings.setValue('testqgissettings/names/namèé↓1', 'qg
|
isrocks↓-1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓-1')
def test_groups(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.addToDefaults('testqgissettings/names/name2', 'qgisrocks2')
self.addToDefaults('testqgissettings/names/name3', 'qgisrocks3')
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(self.settings.group(), 'testqgissettings')
self.assertEqual(['names'], self.settings.childGroups())
self.settings.setValue('surnames/name1', 'qgisrocks-1')
self.assertEqual(['surnames', 'n
|
riking/youtube-dl
|
youtube_dl/downloader/http.py
|
Python
|
unlicense
| 8,667
| 0.002308
|
import os
import time
from .common import FileDownloader
from ..utils import (
compat_urllib_request,
compat_urllib_error,
ContentTooShortError,
encodeFilename,
sanitize_open,
format_bytes,
)
class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
if 'user_agent' in info_dict:
headers['Youtubedl-user-agent'] = info_dict['user_agent']
if 'http_referer' in info_dict:
headers['Referer'] = info_dict['http_referer']
basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers)
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error(u'giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually contro
|
l
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
|
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
data_len_str = format_bytes(data_len)
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
while True:
# Download and write
before = time.time()
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
after = time.time()
if len(data_block) == 0:
break
byte_counter += len(data_block)
# Open file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error(u'unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
# Progress message
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
if data_len is None:
eta = percent = None
else:
percent = self.calc_percent(byte_counter, data_len)
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent, data_len_str, speed, eta)
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'eta': eta,
'speed': speed,
})
if is_test and byte_counter == data_len:
break
# Apply rate limit
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortErr
|
klahnakoski/SpotManager
|
vendor/jx_elasticsearch/es52/painless/__init__.py
|
Python
|
mpl-2.0
| 3,355
| 0.000596
|
from jx_elasticsearc
|
h.es52.painless._utils import Painless, LIST_TO_PIPE
from jx_ela
|
sticsearch.es52.painless.add_op import AddOp
from jx_elasticsearch.es52.painless.and_op import AndOp
from jx_elasticsearch.es52.painless.basic_add_op import BasicAddOp
from jx_elasticsearch.es52.painless.basic_eq_op import BasicEqOp
from jx_elasticsearch.es52.painless.basic_index_of_op import BasicIndexOfOp
from jx_elasticsearch.es52.painless.basic_mul_op import BasicMulOp
from jx_elasticsearch.es52.painless.basic_starts_with_op import BasicStartsWithOp
from jx_elasticsearch.es52.painless.basic_substring_op import BasicSubstringOp
from jx_elasticsearch.es52.painless.boolean_op import BooleanOp
from jx_elasticsearch.es52.painless.case_op import CaseOp
from jx_elasticsearch.es52.painless.coalesce_op import CoalesceOp
from jx_elasticsearch.es52.painless.concat_op import ConcatOp
from jx_elasticsearch.es52.painless.count_op import CountOp
from jx_elasticsearch.es52.painless.date_op import DateOp
from jx_elasticsearch.es52.painless.div_op import DivOp
from jx_elasticsearch.es52.painless.eq_op import EqOp
from jx_elasticsearch.es52.painless.es_script import EsScript
from jx_elasticsearch.es52.painless.exists_op import ExistsOp
from jx_elasticsearch.es52.painless.exp_op import ExpOp
from jx_elasticsearch.es52.painless.find_op import FindOp
from jx_elasticsearch.es52.painless.first_op import FirstOp
from jx_elasticsearch.es52.painless.floor_op import FloorOp
from jx_elasticsearch.es52.painless.gt_op import GtOp
from jx_elasticsearch.es52.painless.gte_op import GteOp
from jx_elasticsearch.es52.painless.in_op import InOp
from jx_elasticsearch.es52.painless.integer_op import IntegerOp
from jx_elasticsearch.es52.painless.is_number_op import IsNumberOp
from jx_elasticsearch.es52.painless.leaves_op import LeavesOp
from jx_elasticsearch.es52.painless.length_op import LengthOp
from jx_elasticsearch.es52.painless.literal import Literal
from jx_elasticsearch.es52.painless.lt_op import LtOp
from jx_elasticsearch.es52.painless.lte_op import LteOp
from jx_elasticsearch.es52.painless.max_op import MaxOp
from jx_elasticsearch.es52.painless.min_op import MinOp
from jx_elasticsearch.es52.painless.missing_op import MissingOp
from jx_elasticsearch.es52.painless.mod_op import ModOp
from jx_elasticsearch.es52.painless.mul_op import MulOp
from jx_elasticsearch.es52.painless.ne_op import NeOp
from jx_elasticsearch.es52.painless.not_left_op import NotLeftOp
from jx_elasticsearch.es52.painless.not_op import NotOp
from jx_elasticsearch.es52.painless.number_op import NumberOp
from jx_elasticsearch.es52.painless.or_op import OrOp
from jx_elasticsearch.es52.painless.prefix_op import PrefixOp
from jx_elasticsearch.es52.painless.string_op import StringOp
from jx_elasticsearch.es52.painless.sub_op import SubOp
from jx_elasticsearch.es52.painless.suffix_op import SuffixOp
from jx_elasticsearch.es52.painless.tuple_op import TupleOp
from jx_elasticsearch.es52.painless.union_op import UnionOp
from jx_elasticsearch.es52.painless.variable import Variable
from jx_elasticsearch.es52.painless.when_op import WhenOp
from jx_elasticsearch.es52.painless.false_op import FalseOp, false_script
from jx_elasticsearch.es52.painless.true_op import TrueOp, true_script
from jx_elasticsearch.es52.painless.null_op import NullOp, null_script
Painless.register_ops(vars())
|
teichopsia-/python_practice
|
old_class_material/MITPerson_class.py
|
Python
|
mpl-2.0
| 903
| 0.026578
|
# Building inheritance
class MITPerson(Person):
nextIdNum = 0 #next ID number to assing
def __init__(self, name):
Person.__init__(self, name) #initialize Person attributes
# new MITPerson atrribute: a unique ID number
self.idNum = MITPerson.nextIdNum
MITPerson.nextIdNum += 1
def getIdNum(self):
return self.idNum
def __It__(self, other):
return self.idNum < other.idNum
class Student(MITPerson):
pass
class UG(Student): #UG = under graduate ###------
def __init__(self, name, classYear):
MITPerson.__init__(self, name)
self.year = classYear
def getClass(self): # getter
|
method
return self.year
class Grad(Student): ##----
pass
class TransferStudent(Student):
pass
def isStudent
|
(obj):
return isinstance(obj, Student)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.